summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/compat/PropertySheet.props2
-rw-r--r--src/compat/compat-lz4.c2187
-rw-r--r--src/compat/compat-lz4.h737
-rw-r--r--src/openvpn/errlevel.h1
-rw-r--r--src/openvpn/init.c14
-rw-r--r--src/openvpn/manage.c30
-rw-r--r--src/openvpn/networking_iproute2.c2
-rw-r--r--src/openvpn/networking_sitnl.c44
-rw-r--r--src/openvpn/openvpn.vcxproj4
-rw-r--r--src/openvpn/options.c86
-rw-r--r--src/openvpn/otime.h1
-rw-r--r--src/openvpn/pool.c18
-rw-r--r--src/openvpn/route.c62
-rw-r--r--src/openvpn/socket.c71
-rw-r--r--src/openvpn/socks.c9
-rw-r--r--src/openvpn/ssl.c8
-rw-r--r--src/openvpn/ssl_ncp.c18
-rw-r--r--src/openvpn/ssl_verify.c177
-rw-r--r--src/openvpn/tun.c233
-rw-r--r--src/openvpnmsica/dllmain.c2
-rw-r--r--src/openvpnmsica/openvpnmsica.c16
-rw-r--r--src/openvpnserv/interactive.c141
-rw-r--r--src/tapctl/main.c2
-rw-r--r--src/tapctl/tap.c242
-rw-r--r--src/tapctl/tap.h6
25 files changed, 2834 insertions, 1279 deletions
diff --git a/src/compat/PropertySheet.props b/src/compat/PropertySheet.props
index fdded31..4f94b97 100644
--- a/src/compat/PropertySheet.props
+++ b/src/compat/PropertySheet.props
@@ -3,7 +3,7 @@
<ImportGroup Label="PropertySheets" />
<PropertyGroup Label="UserMacros">
<SOURCEBASE>$(SolutionDir)</SOURCEBASE>
- <OPENVPN_DEPROOT>$(SOURCEBASE)\..\openvpn-build\msvc\image</OPENVPN_DEPROOT>
+ <OPENVPN_DEPROOT>$(SOURCEBASE)\..\openvpn-build\msvc\image$(PlatformArchitecture)</OPENVPN_DEPROOT>
<OPENSSL_HOME>$(OPENVPN_DEPROOT)</OPENSSL_HOME>
<TAP_WINDOWS_HOME>$(OPENVPN_DEPROOT)</TAP_WINDOWS_HOME>
<LZO_HOME>$(OPENVPN_DEPROOT)</LZO_HOME>
diff --git a/src/compat/compat-lz4.c b/src/compat/compat-lz4.c
index 723157d..26a3980 100644
--- a/src/compat/compat-lz4.c
+++ b/src/compat/compat-lz4.c
@@ -1,5 +1,5 @@
/* This file has been backported by dev-tools/lz4-rebaser.sh
- * from upstream lz4 commit 7bb64ff2b69a9f8367de (v1.7.5)
+ * from upstream lz4 commit fdf2ef5809ca875c4545 (v1.9.2)
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
@@ -10,7 +10,7 @@
#ifdef NEED_COMPAT_LZ4
/*
LZ4 - Fast LZ compression algorithm
- Copyright (C) 2011-2016, Yann Collet.
+ Copyright (C) 2011-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
@@ -42,17 +42,16 @@
- LZ4 source repository : https://github.com/lz4/lz4
*/
-
/*-************************************
* Tuning parameters
**************************************/
/*
- * HEAPMODE :
+ * LZ4_HEAPMODE :
* Select how default compression functions will allocate memory for their hash table,
* in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
*/
-#ifndef HEAPMODE
-# define HEAPMODE 0
+#ifndef LZ4_HEAPMODE
+# define LZ4_HEAPMODE 0
#endif
/*
@@ -73,16 +72,17 @@
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
- * It can generate buggy code on targets which generate assembly depending on alignment.
+ * It can generate buggy code on targets which assembly generation depends on alignment.
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
-#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
-# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
+# if defined(__GNUC__) && \
+ ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define LZ4_FORCE_MEMORY_ACCESS 2
-# elif defined(__INTEL_COMPILER) || \
- (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
# define LZ4_FORCE_MEMORY_ACCESS 1
# endif
#endif
@@ -91,14 +91,32 @@
* LZ4_FORCE_SW_BITCOUNT
* Define this parameter if your target system or compiler does not support hardware bit count
*/
-#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
+#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
# define LZ4_FORCE_SW_BITCOUNT
#endif
+
/*-************************************
* Dependency
**************************************/
+/*
+ * LZ4_SRC_INCLUDED:
+ * Amalgamation flag, whether lz4.c is included
+ */
+#ifndef LZ4_SRC_INCLUDED
+# define LZ4_SRC_INCLUDED 1
+#endif
+
+#ifndef LZ4_STATIC_LINKING_ONLY
+#define LZ4_STATIC_LINKING_ONLY
+#endif
+
+#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
+#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
+#endif
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
#include "compat-lz4.h"
/* see also "memory routines" below */
@@ -107,42 +125,130 @@
* Compiler Options
**************************************/
#ifdef _MSC_VER /* Visual Studio */
-# define FORCE_INLINE static __forceinline
# include <intrin.h>
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
-#else
-# if defined(__GNUC__) || defined(__clang__)
-# define FORCE_INLINE static inline __attribute__((always_inline))
-# elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# define FORCE_INLINE static inline
-# else
-# define FORCE_INLINE static
-# endif
#endif /* _MSC_VER */
+#ifndef LZ4_FORCE_INLINE
+# ifdef _MSC_VER /* Visual Studio */
+# define LZ4_FORCE_INLINE static __forceinline
+# else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define LZ4_FORCE_INLINE static inline
+# endif
+# else
+# define LZ4_FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+# endif /* _MSC_VER */
+#endif /* LZ4_FORCE_INLINE */
+
+/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
+ * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
+ * together with a simple 8-byte copy loop as a fall-back path.
+ * However, this optimization hurts the decompression speed by >30%,
+ * because the execution does not go to the optimized loop
+ * for typical compressible data, and all of the preamble checks
+ * before going to the fall-back path become useless overhead.
+ * This optimization happens only with the -O3 flag, and -O2 generates
+ * a simple 8-byte copy loop.
+ * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
+ * functions are annotated with __attribute__((optimize("O2"))),
+ * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
+ * of LZ4_wildCopy8 does not affect the compression speed.
+ */
+#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
+# define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
+# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
+#else
+# define LZ4_FORCE_O2_GCC_PPC64LE
+# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
+#endif
+
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
# define expect(expr,value) (__builtin_expect ((expr),(value)) )
#else
# define expect(expr,value) (expr)
#endif
+#ifndef likely
#define likely(expr) expect((expr) != 0, 1)
+#endif
+#ifndef unlikely
#define unlikely(expr) expect((expr) != 0, 0)
+#endif
/*-************************************
* Memory routines
**************************************/
#include <stdlib.h> /* malloc, calloc, free */
-#define ALLOCATOR(n,s) calloc(n,s)
-#define FREEMEM free
+#define ALLOC(s) malloc(s)
+#define ALLOC_AND_ZERO(s) calloc(1,s)
+#define FREEMEM(p) free(p)
#include <string.h> /* memset, memcpy */
-#define MEM_INIT memset
+#define MEM_INIT(p,v,s) memset((p),(v),(s))
/*-************************************
-* Basic Types
+* Common Constants
+**************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
+#define FASTLOOP_SAFE_DISTANCE 64
+static const int LZ4_minLength = (MFLIMIT+1);
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
+#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
+# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
+#endif
+
+#define ML_BITS 4
+#define ML_MASK ((1U<<ML_BITS)-1)
+#define RUN_BITS (8-ML_BITS)
+#define RUN_MASK ((1U<<RUN_BITS)-1)
+
+
+/*-************************************
+* Error detection
+**************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
+# include <assert.h>
+#else
+# ifndef assert
+# define assert(condition) ((void)0)
+# endif
+#endif
+
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
+# include <stdio.h>
+static int g_debuglog_enable = 1;
+# define DEBUGLOG(l, ...) { \
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
+#else
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+
+/*-************************************
+* Types
**************************************/
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# include <stdint.h>
@@ -167,6 +273,13 @@
typedef size_t reg_t; /* 32-bits in x32 mode */
#endif
+typedef enum {
+ notLimited = 0,
+ limitedOutput = 1,
+ fillOutput = 2
+} limitedOutput_directive;
+
+
/*-************************************
* Reading and writing into memory
**************************************/
@@ -200,7 +313,7 @@ static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArc
static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
-#else /* safe and portable access through memcpy() */
+#else /* safe and portable access using memcpy() */
static U16 LZ4_read16(const void* memPtr)
{
@@ -251,55 +364,113 @@ static void LZ4_writeLE16(void* memPtr, U16 value)
}
}
-static void LZ4_copy8(void* dst, const void* src)
-{
- memcpy(dst,src,8);
-}
-
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
-static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
+LZ4_FORCE_O2_INLINE_GCC_PPC64LE
+void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
{
BYTE* d = (BYTE*)dstPtr;
const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
- do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
+ do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
}
+static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
-/*-************************************
-* Common Constants
-**************************************/
-#define MINMATCH 4
-#define WILDCOPYLENGTH 8
-#define LASTLITERALS 5
-#define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
-static const int LZ4_minLength = (MFLIMIT+1);
+#ifndef LZ4_FAST_DEC_LOOP
+# if defined(__i386__) || defined(__x86_64__)
+# define LZ4_FAST_DEC_LOOP 1
+# elif defined(__aarch64__) && !defined(__clang__)
+ /* On aarch64, we disable this optimization for clang because on certain
+ * mobile chipsets and clang, it reduces performance. For more information
+ * refer to https://github.com/lz4/lz4/pull/707. */
+# define LZ4_FAST_DEC_LOOP 1
+# else
+# define LZ4_FAST_DEC_LOOP 0
+# endif
+#endif
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
+#if LZ4_FAST_DEC_LOOP
+
+LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
+LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
+{
+ if (offset < 8) {
+ dstPtr[0] = srcPtr[0];
+ dstPtr[1] = srcPtr[1];
+ dstPtr[2] = srcPtr[2];
+ dstPtr[3] = srcPtr[3];
+ srcPtr += inc32table[offset];
+ memcpy(dstPtr+4, srcPtr, 4);
+ srcPtr -= dec64table[offset];
+ dstPtr += 8;
+ } else {
+ memcpy(dstPtr, srcPtr, 8);
+ dstPtr += 8;
+ srcPtr += 8;
+ }
-#define MAXD_LOG 16
-#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+ LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
+}
-#define ML_BITS 4
-#define ML_MASK ((1U<<ML_BITS)-1)
-#define RUN_BITS (8-ML_BITS)
-#define RUN_MASK ((1U<<RUN_BITS)-1)
+/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
+ * this version copies two times 16 bytes (instead of one time 32 bytes)
+ * because it must be compatible with offsets >= 16. */
+LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
+LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* const e = (BYTE*)dstEnd;
+
+ do { memcpy(d,s,16); memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
+}
+/* LZ4_memcpy_using_offset() presumes :
+ * - dstEnd >= dstPtr + MINMATCH
+ * - there is at least 8 bytes available to write after dstEnd */
+LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
+LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
+{
+ BYTE v[8];
-/*-************************************
-* Common Utils
-**************************************/
-#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+ assert(dstEnd >= dstPtr + MINMATCH);
+ LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
+
+ switch(offset) {
+ case 1:
+ memset(v, *srcPtr, 8);
+ break;
+ case 2:
+ memcpy(v, srcPtr, 2);
+ memcpy(&v[2], srcPtr, 2);
+ memcpy(&v[4], &v[0], 4);
+ break;
+ case 4:
+ memcpy(v, srcPtr, 4);
+ memcpy(&v[4], srcPtr, 4);
+ break;
+ default:
+ LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
+ return;
+ }
+
+ memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ while (dstPtr < dstEnd) {
+ memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ }
+}
+#endif
/*-************************************
* Common functions
**************************************/
-static unsigned LZ4_NbCommonBytes (register reg_t val)
+static unsigned LZ4_NbCommonBytes (reg_t val)
{
if (LZ4_isLittleEndian()) {
if (sizeof(val)==8) {
@@ -308,9 +479,16 @@ static unsigned LZ4_NbCommonBytes (register reg_t val)
_BitScanForward64( &r, (U64)val );
return (int)(r>>3);
# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (__builtin_ctzll((U64)val) >> 3);
+ return (unsigned)__builtin_ctzll((U64)val) >> 3;
# else
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
+ 0, 3, 1, 3, 1, 4, 2, 7,
+ 0, 2, 3, 6, 1, 5, 3, 5,
+ 1, 3, 4, 4, 2, 5, 6, 7,
+ 7, 0, 1, 2, 3, 3, 4, 6,
+ 2, 6, 5, 5, 3, 4, 5, 6,
+ 7, 1, 2, 4, 6, 4, 4, 5,
+ 7, 2, 6, 5, 7, 6, 7, 7 };
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
# endif
} else /* 32 bits */ {
@@ -319,23 +497,29 @@ static unsigned LZ4_NbCommonBytes (register reg_t val)
_BitScanForward( &r, (U32)val );
return (int)(r>>3);
# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (__builtin_ctz((U32)val) >> 3);
+ return (unsigned)__builtin_ctz((U32)val) >> 3;
# else
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
+ 3, 2, 2, 1, 3, 2, 0, 1,
+ 3, 3, 1, 2, 2, 2, 2, 0,
+ 3, 1, 2, 0, 1, 0, 1, 1 };
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
# endif
}
} else /* Big Endian CPU */ {
- if (sizeof(val)==8) {
+ if (sizeof(val)==8) { /* 64-bits */
# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
_BitScanReverse64( &r, val );
return (unsigned)(r>>3);
# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (__builtin_clzll((U64)val) >> 3);
+ return (unsigned)__builtin_clzll((U64)val) >> 3;
# else
+ static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
+ Note that this code path is never triggered in 32-bits mode. */
unsigned r;
- if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
+ if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
r += (!val);
return r;
@@ -346,7 +530,7 @@ static unsigned LZ4_NbCommonBytes (register reg_t val)
_BitScanReverse( &r, (unsigned long)val );
return (unsigned)(r>>3);
# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (__builtin_clz((U32)val) >> 3);
+ return (unsigned)__builtin_clz((U32)val) >> 3;
# else
unsigned r;
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
@@ -358,11 +542,20 @@ static unsigned LZ4_NbCommonBytes (register reg_t val)
}
#define STEPSIZE sizeof(reg_t)
-static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
+LZ4_FORCE_INLINE
+unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
{
const BYTE* const pStart = pIn;
- while (likely(pIn<pInLimit-(STEPSIZE-1))) {
+ if (likely(pIn < pInLimit-(STEPSIZE-1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) {
+ pIn+=STEPSIZE; pMatch+=STEPSIZE;
+ } else {
+ return LZ4_NbCommonBytes(diff);
+ } }
+
+ while (likely(pIn < pInLimit-(STEPSIZE-1))) {
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
pIn += LZ4_NbCommonBytes(diff);
@@ -387,15 +580,34 @@ static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression ru
/*-************************************
* Local Structures and types
**************************************/
-typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
-typedef enum { byPtr, byU32, byU16 } tableType_t;
-
-typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
+
+/**
+ * This enum distinguishes several different modes of accessing previous
+ * content in the stream.
+ *
+ * - noDict : There is no preceding content.
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
+ * blob being compressed are valid and refer to the preceding
+ * content (of length ctx->dictSize), which is available
+ * contiguously preceding in memory the content currently
+ * being compressed.
+ * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
+ * else in memory, starting at ctx->dictionary with length
+ * ctx->dictSize.
+ * - usingDictCtx : Like usingExtDict, but everything concerning the preceding
+ * content is in a separate context, pointed to by
+ * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
+ * entries in the current context that refer to positions
+ * preceding the beginning of the current compression are
+ * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
+ * ->dictSize describe the location and size of the preceding
+ * content, and matches are found by looking in the ctx
+ * ->dictCtx->hashTable.
+ */
+typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
-typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { full = 0, partial = 1 } earlyEnd_directive;
-
/*-************************************
* Local Utils
@@ -406,6 +618,23 @@ int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
+/*-************************************
+* Internal Definitions used in Tests
+**************************************/
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
+
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
/*-******************************
* Compression functions
********************************/
@@ -419,102 +648,225 @@ static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
{
- static const U64 prime5bytes = 889523592379ULL;
- static const U64 prime8bytes = 11400714785074694791ULL;
const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
- if (LZ4_isLittleEndian())
+ if (LZ4_isLittleEndian()) {
+ const U64 prime5bytes = 889523592379ULL;
return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
- else
+ } else {
+ const U64 prime8bytes = 11400714785074694791ULL;
return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
+ }
}
-FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
+LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
{
if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
return LZ4_hash4(LZ4_read32(p), tableType);
}
-static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
+static void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
{
switch (tableType)
{
+ default: /* fallthrough */
+ case clearedTable: { /* illegal! */ assert(0); return; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
+ }
+}
+
+static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: /* fallthrough */
+ case byPtr: { /* illegal! */ assert(0); return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
+ }
+}
+
+static void LZ4_putPositionOnHash(const BYTE* p, U32 h,
+ void* tableBase, tableType_t const tableType,
+ const BYTE* srcBase)
+{
+ switch (tableType)
+ {
+ case clearedTable: { /* illegal! */ assert(0); return; }
case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
}
}
-FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
}
-static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+/* LZ4_getIndexOnHash() :
+ * Index of match position registered in hash table.
+ * hash position must be calculated by using base+index, or dictBase+index.
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
+ * Assumption 2 : h is presumed valid (within limits of hash table)
+ */
+static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
+{
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
+ if (tableType == byU32) {
+ const U32* const hashTable = (const U32*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
+ return hashTable[h];
+ }
+ if (tableType == byU16) {
+ const U16* const hashTable = (const U16*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
+ return hashTable[h];
+ }
+ assert(0); return 0; /* forbidden case */
+}
+
+static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
- if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
- if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
- { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
+ if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
+ if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
+ { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
}
-FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+LZ4_FORCE_INLINE const BYTE*
+LZ4_getPosition(const BYTE* p,
+ const void* tableBase, tableType_t tableType,
+ const BYTE* srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
+LZ4_FORCE_INLINE void
+LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
+ const int inputSize,
+ const tableType_t tableType) {
+ /* If compression failed during the previous step, then the context
+ * is marked as dirty, therefore, it has to be fully reset.
+ */
+ if (cctx->dirty) {
+ DEBUGLOG(5, "LZ4_prepareTable: Full reset for %p", cctx);
+ MEM_INIT(cctx, 0, sizeof(LZ4_stream_t_internal));
+ return;
+ }
+
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure
+ * out if it's safe to leave as is or whether it needs to be reset.
+ */
+ if (cctx->tableType != clearedTable) {
+ assert(inputSize >= 0);
+ if (cctx->tableType != tableType
+ || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
+ || ((tableType == byU32) && cctx->currentOffset > 1 GB)
+ || tableType == byPtr
+ || inputSize >= 4 KB)
+ {
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
+ MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
+ cctx->currentOffset = 0;
+ cctx->tableType = clearedTable;
+ } else {
+ DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
+ }
+ }
+
+ /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster
+ * than compressing without a gap. However, compressing with
+ * currentOffset == 0 is faster still, so we preserve that case.
+ */
+ if (cctx->currentOffset != 0 && tableType == byU32) {
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
+ cctx->currentOffset += 64 KB;
+ }
+
+ /* Finally, clear history */
+ cctx->dictCtx = NULL;
+ cctx->dictionary = NULL;
+ cctx->dictSize = 0;
+}
/** LZ4_compress_generic() :
inlined, to ensure branches are decided at compilation time */
-FORCE_INLINE int LZ4_compress_generic(
+LZ4_FORCE_INLINE int LZ4_compress_generic(
LZ4_stream_t_internal* const cctx,
const char* const source,
char* const dest,
const int inputSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
const int maxOutputSize,
- const limitedOutput_directive outputLimited,
+ const limitedOutput_directive outputDirective,
const tableType_t tableType,
- const dict_directive dict,
+ const dict_directive dictDirective,
const dictIssue_directive dictIssue,
- const U32 acceleration)
+ const int acceleration)
{
+ int result;
const BYTE* ip = (const BYTE*) source;
- const BYTE* base;
+
+ U32 const startIndex = cctx->currentOffset;
+ const BYTE* base = (const BYTE*) source - startIndex;
const BYTE* lowLimit;
- const BYTE* const lowRefLimit = ip - cctx->dictSize;
- const BYTE* const dictionary = cctx->dictionary;
- const BYTE* const dictEnd = dictionary + cctx->dictSize;
- const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
+
+ const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
+ const BYTE* const dictionary =
+ dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
+ const U32 dictSize =
+ dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
+ const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
+
+ int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
+ U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
+ const BYTE* const dictEnd = dictionary + dictSize;
const BYTE* anchor = (const BYTE*) source;
const BYTE* const iend = ip + inputSize;
- const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
const BYTE* const matchlimit = iend - LASTLITERALS;
+ /* the dictCtx currentOffset is indexed on the start of the dictionary,
+ * while a dictionary in the current context precedes the currentOffset */
+ const BYTE* dictBase = (dictDirective == usingDictCtx) ?
+ dictionary + dictSize - dictCtx->currentOffset :
+ dictionary + dictSize - startIndex;
+
BYTE* op = (BYTE*) dest;
BYTE* const olimit = op + maxOutputSize;
+ U32 offset = 0;
U32 forwardH;
- /* Init conditions */
- if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */
- switch(dict)
- {
- case noDict:
- default:
- base = (const BYTE*)source;
- lowLimit = (const BYTE*)source;
- break;
- case withPrefix64k:
- base = (const BYTE*)source - cctx->currentOffset;
- lowLimit = (const BYTE*)source - cctx->dictSize;
- break;
- case usingExtDict:
- base = (const BYTE*)source - cctx->currentOffset;
- lowLimit = (const BYTE*)source;
- break;
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType);
+ /* If init conditions are not met, we don't have to mark stream
+ * as having dirty context, since no action was taken yet */
+ if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported inputSize, too large (or negative) */
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
+ if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
+ assert(acceleration >= 1);
+
+ lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
+
+ /* Update context state */
+ if (dictDirective == usingDictCtx) {
+ /* Subsequent linked blocks can't use the dictionary. */
+ /* Instead, they use the block we just compressed. */
+ cctx->dictCtx = NULL;
+ cctx->dictSize = (U32)inputSize;
+ } else {
+ cctx->dictSize += (U32)inputSize;
}
- if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
- if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+ cctx->currentOffset += (U32)inputSize;
+ cctx->tableType = (U16)tableType;
+
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
/* First Byte */
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
@@ -522,50 +874,112 @@ FORCE_INLINE int LZ4_compress_generic(
/* Main Loop */
for ( ; ; ) {
- ptrdiff_t refDelta = 0;
const BYTE* match;
BYTE* token;
+ const BYTE* filledIp;
/* Find a match */
- { const BYTE* forwardIp = ip;
- unsigned step = 1;
- unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
+ if (tableType == byPtr) {
+ const BYTE* forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
do {
U32 const h = forwardH;
ip = forwardIp;
forwardIp += step;
step = (searchMatchNb++ >> LZ4_skipTrigger);
- if (unlikely(forwardIp > mflimit)) goto _last_literals;
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
- if (dict==usingExtDict) {
- if (match < (const BYTE*)source) {
- refDelta = dictDelta;
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+
+ } while ( (match+LZ4_DISTANCE_MAX < ip)
+ || (LZ4_read32(match) != LZ4_read32(ip)) );
+
+ } else { /* byU32, byU16 */
+
+ const BYTE* forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ U32 const current = (U32)(forwardIp - base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex <= current);
+ assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ assert(tableType == byU32);
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ matchIndex += dictDelta; /* make dictCtx index comparable with current context */
lowLimit = dictionary;
} else {
- refDelta = 0;
+ match = base + matchIndex;
lowLimit = (const BYTE*)source;
- } }
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
+ assert(startIndex - matchIndex >= MINMATCH);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source;
+ }
+ } else { /* single continuous memory segment */
+ match = base + matchIndex;
+ }
forwardH = LZ4_hashPosition(forwardIp, tableType);
- LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+
+ DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
+ assert(matchIndex < current);
+ if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
+ && (matchIndex+LZ4_DISTANCE_MAX < current)) {
+ continue;
+ } /* too far */
+ assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ if (maybe_extMem) offset = current - matchIndex;
+ break; /* match found */
+ }
- } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
- || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
+ } while(1);
}
/* Catch up */
- while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
+ filledIp = ip;
+ while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
/* Encode Literals */
{ unsigned const litLength = (unsigned)(ip - anchor);
token = op++;
- if ((outputLimited) && /* Check output buffer overflow */
- (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
- return 0;
+ if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ if ((outputDirective == fillOutput) &&
+ (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
+ op--;
+ goto _last_literals;
+ }
if (litLength >= RUN_MASK) {
- int len = (int)litLength-RUN_MASK;
+ int len = (int)(litLength - RUN_MASK);
*token = (RUN_MASK<<ML_BITS);
for(; len >= 255 ; len-=255) *op++ = 255;
*op++ = (BYTE)len;
@@ -573,82 +987,183 @@ FORCE_INLINE int LZ4_compress_generic(
else *token = (BYTE)(litLength<<ML_BITS);
/* Copy Literals */
- LZ4_wildCopy(op, anchor, op+litLength);
+ LZ4_wildCopy8(op, anchor, op+litLength);
op+=litLength;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
}
_next_match:
+ /* at this stage, the following variables must be correctly set :
+ * - ip : at start of LZ operation
+ * - match : at start of previous pattern occurence; can be within current prefix, or within extDict
+ * - offset : if maybe_ext_memSegment==1 (constant)
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
+ */
+
+ if ((outputDirective == fillOutput) &&
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
+ /* the match was too close to the end, rewind and go to last literals */
+ op = token;
+ goto _last_literals;
+ }
+
/* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
+ if (maybe_extMem) { /* static test */
+ DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
+ assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
+ LZ4_writeLE16(op, (U16)offset); op+=2;
+ } else {
+ DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
+ assert(ip-match <= LZ4_DISTANCE_MAX);
+ LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
+ }
/* Encode MatchLength */
{ unsigned matchCode;
- if ((dict==usingExtDict) && (lowLimit==dictionary)) {
- const BYTE* limit;
- match += refDelta;
- limit = ip + (dictEnd-match);
+ if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
+ && (lowLimit==dictionary) /* match within extDict */ ) {
+ const BYTE* limit = ip + (dictEnd-match);
+ assert(dictEnd > match);
if (limit > matchlimit) limit = matchlimit;
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
- ip += MINMATCH + matchCode;
+ ip += (size_t)matchCode + MINMATCH;
if (ip==limit) {
- unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
+ unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
matchCode += more;
ip += more;
}
+ DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
} else {
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
- ip += MINMATCH + matchCode;
+ ip += (size_t)matchCode + MINMATCH;
+ DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
}
- if ( outputLimited && /* Check output buffer overflow */
- (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
- return 0;
+ if ((outputDirective) && /* Check output buffer overflow */
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
+ if (outputDirective == fillOutput) {
+ /* Match description too long : reduce it */
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
+ ip -= matchCode - newMatchCode;
+ assert(newMatchCode < matchCode);
+ matchCode = newMatchCode;
+ if (unlikely(ip <= filledIp)) {
+ /* We have already filled up to filledIp so if ip ends up less than filledIp
+ * we have positions in the hash table beyond the current position. This is
+ * a problem if we reuse the hash table. So we have to remove these positions
+ * from the hash table.
+ */
+ const BYTE* ptr;
+ DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
+ for (ptr = ip; ptr <= filledIp; ++ptr) {
+ U32 const h = LZ4_hashPosition(ptr, tableType);
+ LZ4_clearHash(h, cctx->hashTable, tableType);
+ }
+ }
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
if (matchCode >= ML_MASK) {
*token += ML_MASK;
matchCode -= ML_MASK;
LZ4_write32(op, 0xFFFFFFFF);
- while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255;
+ while (matchCode >= 4*255) {
+ op+=4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4*255;
+ }
op += matchCode / 255;
*op++ = (BYTE)(matchCode % 255);
} else
*token += (BYTE)(matchCode);
}
+ /* Ensure we have enough space for the last literals. */
+ assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
anchor = ip;
/* Test end of chunk */
- if (ip > mflimit) break;
+ if (ip >= mflimitPlusOne) break;
/* Fill table */
LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
/* Test next position */
- match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
- if (dict==usingExtDict) {
- if (match < (const BYTE*)source) {
- refDelta = dictDelta;
- lowLimit = dictionary;
- } else {
- refDelta = 0;
- lowLimit = (const BYTE*)source;
- } }
- LZ4_putPosition(ip, cctx->hashTable, tableType, base);
- if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
- && (match+MAX_DISTANCE>=ip)
- && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
- { token=op++; *token=0; goto _next_match; }
+ if (tableType == byPtr) {
+
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ if ( (match+LZ4_DISTANCE_MAX >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip)) )
+ { token=op++; *token=0; goto _next_match; }
+
+ } else { /* byU32, byU16 */
+
+ U32 const h = LZ4_hashPosition(ip, tableType);
+ U32 const current = (U32)(ip-base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ matchIndex += dictDelta;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else { /* single memory segment */
+ match = base + matchIndex;
+ }
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
+ && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
+ && (LZ4_read32(match) == LZ4_read32(ip)) ) {
+ token=op++;
+ *token=0;
+ if (maybe_extMem) offset = current - matchIndex;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
+ goto _next_match;
+ }
+ }
/* Prepare next loop */
forwardH = LZ4_hashPosition(++ip, tableType);
+
}
_last_literals:
/* Encode Last Literals */
- { size_t const lastRun = (size_t)(iend - anchor);
- if ( (outputLimited) && /* Check output buffer overflow */
- ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
- return 0;
+ { size_t lastRun = (size_t)(iend - anchor);
+ if ( (outputDirective) && /* Check output buffer overflow */
+ (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
+ if (outputDirective == fillOutput) {
+ /* adapt lastRun to fill 'dst' */
+ assert(olimit >= op);
+ lastRun = (size_t)(olimit-op) - 1;
+ lastRun -= (lastRun+240)/255;
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
if (lastRun >= RUN_MASK) {
size_t accumulator = lastRun - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
@@ -658,251 +1173,154 @@ _last_literals:
*op++ = (BYTE)(lastRun<<ML_BITS);
}
memcpy(op, anchor, lastRun);
+ ip = anchor + lastRun;
op += lastRun;
}
- /* End */
- return (int) (((char*)op)-dest);
+ if (outputDirective == fillOutput) {
+ *inputConsumed = (int) (((const char*)ip)-source);
+ }
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, (int)(((char*)op) - dest));
+ result = (int)(((char*)op) - dest);
+ assert(result > 0);
+ return result;
}
int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
+ LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
+ assert(ctx != NULL);
+ if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
+ if (maxOutputSize >= LZ4_compressBound(inputSize)) {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ }
+}
+
+/**
+ * LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
+ * "correctly initialized").
+ */
+int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
+{
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
- LZ4_resetStream((LZ4_stream_t*)state);
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
- if (maxOutputSize >= LZ4_compressBound(inputSize)) {
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
- else
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
+ if (dstCapacity >= LZ4_compressBound(srcSize)) {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
} else {
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
- else
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
}
}
int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
-#if (HEAPMODE)
- void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ int result;
+#if (LZ4_HEAPMODE)
+ LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctxPtr == NULL) return 0;
#else
LZ4_stream_t ctx;
- void* const ctxPtr = &ctx;
+ LZ4_stream_t* const ctxPtr = &ctx;
#endif
+ result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
- int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
-
-#if (HEAPMODE)
+#if (LZ4_HEAPMODE)
FREEMEM(ctxPtr);
#endif
return result;
}
-int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
+int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
{
- return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
+ return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
}
/* hidden debug function */
/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
-int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+int LZ4_compress_fast_force(const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
{
LZ4_stream_t ctx;
- LZ4_resetStream(&ctx);
-
- if (inputSize < LZ4_64Klimit)
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
- else
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
-}
-
-
-/*-******************************
-* *_destSize() variant
-********************************/
+ LZ4_initStream(&ctx, sizeof(ctx));
-static int LZ4_compress_destSize_generic(
- LZ4_stream_t_internal* const ctx,
- const char* const src,
- char* const dst,
- int* const srcSizePtr,
- const int targetDstSize,
- const tableType_t tableType)
-{
- const BYTE* ip = (const BYTE*) src;
- const BYTE* base = (const BYTE*) src;
- const BYTE* lowLimit = (const BYTE*) src;
- const BYTE* anchor = ip;
- const BYTE* const iend = ip + *srcSizePtr;
- const BYTE* const mflimit = iend - MFLIMIT;
- const BYTE* const matchlimit = iend - LASTLITERALS;
-
- BYTE* op = (BYTE*) dst;
- BYTE* const oend = op + targetDstSize;
- BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
- BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
- BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
-
- U32 forwardH;
-
-
- /* Init conditions */
- if (targetDstSize < 1) return 0; /* Impossible to store anything */
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
- if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
- if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
-
- /* First Byte */
- *srcSizePtr = 0;
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
- ip++; forwardH = LZ4_hashPosition(ip, tableType);
-
- /* Main Loop */
- for ( ; ; ) {
- const BYTE* match;
- BYTE* token;
-
- /* Find a match */
- { const BYTE* forwardIp = ip;
- unsigned step = 1;
- unsigned searchMatchNb = 1 << LZ4_skipTrigger;
-
- do {
- U32 h = forwardH;
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_skipTrigger);
-
- if (unlikely(forwardIp > mflimit)) goto _last_literals;
-
- match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
- forwardH = LZ4_hashPosition(forwardIp, tableType);
- LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
-
- } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
- || (LZ4_read32(match) != LZ4_read32(ip)) );
- }
-
- /* Catch up */
- while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
-
- /* Encode Literal length */
- { unsigned litLength = (unsigned)(ip - anchor);
- token = op++;
- if (op + ((litLength+240)/255) + litLength > oMaxLit) {
- /* Not enough space for a last match */
- op--;
- goto _last_literals;
- }
- if (litLength>=RUN_MASK) {
- unsigned len = litLength - RUN_MASK;
- *token=(RUN_MASK<<ML_BITS);
- for(; len >= 255 ; len-=255) *op++ = 255;
- *op++ = (BYTE)len;
- }
- else *token = (BYTE)(litLength<<ML_BITS);
-
- /* Copy Literals */
- LZ4_wildCopy(op, anchor, op+litLength);
- op += litLength;
- }
-
-_next_match:
- /* Encode Offset */
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
-
- /* Encode MatchLength */
- { size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
-
- if (op + ((matchLength+240)/255) > oMaxMatch) {
- /* Match description too long : reduce it */
- matchLength = (15-1) + (oMaxMatch-op) * 255;
- }
- ip += MINMATCH + matchLength;
-
- if (matchLength>=ML_MASK) {
- *token += ML_MASK;
- matchLength -= ML_MASK;
- while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
- *op++ = (BYTE)matchLength;
- }
- else *token += (BYTE)(matchLength);
- }
-
- anchor = ip;
-
- /* Test end of block */
- if (ip > mflimit) break;
- if (op > oMaxSeq) break;
-
- /* Fill table */
- LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
-
- /* Test next position */
- match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
- LZ4_putPosition(ip, ctx->hashTable, tableType, base);
- if ( (match+MAX_DISTANCE>=ip)
- && (LZ4_read32(match)==LZ4_read32(ip)) )
- { token=op++; *token=0; goto _next_match; }
-
- /* Prepare next loop */
- forwardH = LZ4_hashPosition(++ip, tableType);
- }
-
-_last_literals:
- /* Encode Last Literals */
- { size_t lastRunSize = (size_t)(iend - anchor);
- if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
- /* adapt lastRunSize to fill 'dst' */
- lastRunSize = (oend-op) - 1;
- lastRunSize -= (lastRunSize+240)/255;
- }
- ip = anchor + lastRunSize;
-
- if (lastRunSize >= RUN_MASK) {
- size_t accumulator = lastRunSize - RUN_MASK;
- *op++ = RUN_MASK << ML_BITS;
- for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRunSize<<ML_BITS);
- }
- memcpy(op, anchor, lastRunSize);
- op += lastRunSize;
+ if (srcSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ tableType_t const addrMode = (sizeof(void*) > 4) ? byU32 : byPtr;
+ return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, addrMode, noDict, noDictIssue, acceleration);
}
-
- /* End */
- *srcSizePtr = (int) (((const char*)ip)-src);
- return (int) (((char*)op)-dst);
}
+/* Note!: This function leaves the stream in an unclean/broken state!
+ * It is not safe to subsequently use the same state with a _fastReset() or
+ * _continue() call without resetting it. */
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
- LZ4_resetStream(state);
+ void* const s = LZ4_initStream(state, sizeof (*state));
+ assert(s != NULL); (void)s;
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
} else {
- if (*srcSizePtr < LZ4_64Klimit)
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
- else
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
- }
+ if (*srcSizePtr < LZ4_64Klimit) {
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
+ } else {
+ tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
+ } }
}
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
-#if (HEAPMODE)
- LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+#if (LZ4_HEAPMODE)
+ LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctx == NULL) return 0;
#else
LZ4_stream_t ctxBody;
LZ4_stream_t* ctx = &ctxBody;
@@ -910,7 +1328,7 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe
int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
-#if (HEAPMODE)
+#if (LZ4_HEAPMODE)
FREEMEM(ctx);
#endif
return result;
@@ -924,19 +1342,54 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe
LZ4_stream_t* LZ4_createStream(void)
{
- LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
+ LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
- LZ4_resetStream(lz4s);
+ DEBUGLOG(4, "LZ4_createStream %p", lz4s);
+ if (lz4s == NULL) return NULL;
+ LZ4_initStream(lz4s, sizeof(*lz4s));
return lz4s;
}
+#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
+ it reports an aligment of 8-bytes,
+ while actually aligning LZ4_stream_t on 4 bytes. */
+static size_t LZ4_stream_t_alignment(void)
+{
+ struct { char c; LZ4_stream_t t; } t_a;
+ return sizeof(t_a) - sizeof(t_a.t);
+}
+#endif
+
+LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
+{
+ DEBUGLOG(5, "LZ4_initStream");
+ if (buffer == NULL) { return NULL; }
+ if (size < sizeof(LZ4_stream_t)) { return NULL; }
+#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
+ it reports an aligment of 8-bytes,
+ while actually aligning LZ4_stream_t on 4 bytes. */
+ if (((size_t)buffer) & (LZ4_stream_t_alignment() - 1)) { return NULL; } /* alignment check */
+#endif
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t));
+ return (LZ4_stream_t*)buffer;
+}
+
+/* resetStream is now deprecated,
+ * prefer initStream() which is more general */
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
{
+ DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
}
+void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
+ LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
+}
+
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
{
+ if (!LZ4_stream) return 0; /* support free on NULL */
+ DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
FREEMEM(LZ4_stream);
return (0);
}
@@ -946,43 +1399,88 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
{
LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
+ const tableType_t tableType = byU32;
const BYTE* p = (const BYTE*)dictionary;
const BYTE* const dictEnd = p + dictSize;
const BYTE* base;
- if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */
- LZ4_resetStream(LZ4_dict);
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
+
+ /* It's necessary to reset the context,
+ * and not just continue it with prepareTable()
+ * to avoid any risk of generating overflowing matchIndex
+ * when compressing using this dictionary */
+ LZ4_resetStream(LZ4_dict);
+
+ /* We always increment the offset by 64 KB, since, if the dict is longer,
+ * we truncate it to the last 64k, and if it's shorter, we still want to
+ * advance by a whole window length so we can provide the guarantee that
+ * there are only valid offsets in the window, which allows an optimization
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
+ * dictionary isn't a full 64k. */
+ dict->currentOffset += 64 KB;
if (dictSize < (int)HASH_UNIT) {
- dict->dictionary = NULL;
- dict->dictSize = 0;
return 0;
}
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
- dict->currentOffset += 64 KB;
- base = p - dict->currentOffset;
+ base = dictEnd - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
- dict->currentOffset += dict->dictSize;
+ dict->tableType = tableType;
while (p <= dictEnd-HASH_UNIT) {
- LZ4_putPosition(p, dict->hashTable, byU32, base);
+ LZ4_putPosition(p, dict->hashTable, tableType, base);
p+=3;
}
- return dict->dictSize;
+ return (int)dict->dictSize;
}
+void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
+ const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
+ &(dictionaryStream->internal_donotuse);
+
+ DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
+ workingStream, dictionaryStream,
+ dictCtx != NULL ? dictCtx->dictSize : 0);
+
+ /* Calling LZ4_resetStream_fast() here makes sure that changes will not be
+ * erased by subsequent calls to LZ4_resetStream_fast() in case stream was
+ * marked as having dirty context, e.g. requiring full reset.
+ */
+ LZ4_resetStream_fast(workingStream);
+
+ if (dictCtx != NULL) {
+ /* If the current offset is zero, we will never look in the
+ * external dictionary context, since there is no value a table
+ * entry can take that indicate a miss. In that case, we need
+ * to bump the offset to something non-zero.
+ */
+ if (workingStream->internal_donotuse.currentOffset == 0) {
+ workingStream->internal_donotuse.currentOffset = 64 KB;
+ }
-static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
+ /* Don't actually attach an empty dictionary.
+ */
+ if (dictCtx->dictSize == 0) {
+ dictCtx = NULL;
+ }
+ }
+ workingStream->internal_donotuse.dictCtx = dictCtx;
+}
+
+
+static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
{
- if ((LZ4_dict->currentOffset > 0x80000000) ||
- ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */
+ assert(nextSize >= 0);
+ if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
/* rescale hash table */
U32 const delta = LZ4_dict->currentOffset - 64 KB;
const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
int i;
+ DEBUGLOG(4, "LZ4_renormDictT");
for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
else LZ4_dict->hashTable[i] -= delta;
@@ -994,17 +1492,30 @@ static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
}
-int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
+ const char* source, char* dest,
+ int inputSize, int maxOutputSize,
+ int acceleration)
{
+ const tableType_t tableType = byU32;
LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
- const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
+ const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
- const BYTE* smallest = (const BYTE*) source;
- if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
- if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
- LZ4_renormDictT(streamPtr, smallest);
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
+
+ if (streamPtr->dirty) { return 0; } /* Uninitialized structure detected */
+ LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
+ /* invalidate tiny dictionaries */
+ if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
+ && (dictEnd != (const BYTE*)source) ) {
+ DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = (const BYTE*)source;
+ dictEnd = (const BYTE*)source;
+ }
+
/* Check overlapping input/dictionary space */
{ const BYTE* sourceEnd = (const BYTE*) source + inputSize;
if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
@@ -1017,46 +1528,61 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch
/* prefix mode : source data follows dictionary */
if (dictEnd == (const BYTE*)source) {
- int result;
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
else
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
- streamPtr->dictSize += (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
- return result;
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
}
/* external dictionary mode */
{ int result;
- if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
- else
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
+ if (streamPtr->dictCtx) {
+ /* We depend here on the fact that dictCtx'es (produced by
+ * LZ4_loadDict) guarantee that their tables contain no references
+ * to offsets between dictCtx->currentOffset - 64 KB and
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
+ * to use noDictIssue even when the dict isn't a full 64 KB.
+ */
+ if (inputSize > 4 KB) {
+ /* For compressing large blobs, it is faster to pay the setup
+ * cost to copy the dictionary's tables into the active context,
+ * so that the compression loop is only looking into one table.
+ */
+ memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
+ }
+ } else {
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ }
+ }
streamPtr->dictionary = (const BYTE*)source;
streamPtr->dictSize = (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
return result;
}
}
-/* Hidden debug function, to force external dictionary mode */
-int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
+/* Hidden debug function, to force-test external dictionary mode */
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
{
LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
int result;
- const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
- const BYTE* smallest = dictEnd;
- if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
- LZ4_renormDictT(streamPtr, smallest);
+ LZ4_renormDictT(streamPtr, srcSize);
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+ }
streamPtr->dictionary = (const BYTE*)source;
- streamPtr->dictSize = (U32)inputSize;
- streamPtr->currentOffset += (U32)inputSize;
+ streamPtr->dictSize = (U32)srcSize;
return result;
}
@@ -1074,8 +1600,8 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
- if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
- if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
+ if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
+ if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
@@ -1087,218 +1613,587 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
-/*-*****************************
-* Decompression functions
-*******************************/
+/*-*******************************
+ * Decompression functions
+ ********************************/
+
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#undef MIN
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+
+/* Read the variable-length literal or match length.
+ *
+ * ip - pointer to use as input.
+ * lencheck - end ip. Return an error if ip advances >= lencheck.
+ * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so.
+ * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so.
+ * error (output) - error code. Should be set to 0 before call.
+ */
+typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
+LZ4_FORCE_INLINE unsigned
+read_variable_length(const BYTE**ip, const BYTE* lencheck, int loop_check, int initial_check, variable_length_error* error)
+{
+ unsigned length = 0;
+ unsigned s;
+ if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = initial_error;
+ return length;
+ }
+ do {
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = loop_error;
+ return length;
+ }
+ } while (s==255);
+
+ return length;
+}
+
/*! LZ4_decompress_generic() :
- * This generic decompression function cover all use cases.
- * It shall be instantiated several times, using different sets of directives
- * Note that it is important this generic function is really inlined,
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
* in order to remove useless branches during compilation optimization.
*/
-FORCE_INLINE int LZ4_decompress_generic(
- const char* const source,
- char* const dest,
- int inputSize,
- int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
-
- int endOnInput, /* endOnOutputSize, endOnInputSize */
- int partialDecoding, /* full, partial */
- int targetOutputSize, /* only used if partialDecoding==partial */
- int dict, /* noDict, withPrefix64k, usingExtDict */
- const BYTE* const lowPrefix, /* == dest when no prefix */
+LZ4_FORCE_INLINE int
+LZ4_decompress_generic(
+ const char* const src,
+ char* const dst,
+ int srcSize,
+ int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
+
+ endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */
+ earlyEnd_directive partialDecoding, /* full, partial */
+ dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
+ const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
const BYTE* const dictStart, /* only if dict==usingExtDict */
const size_t dictSize /* note : = 0 if noDict */
)
{
- /* Local Variables */
- const BYTE* ip = (const BYTE*) source;
- const BYTE* const iend = ip + inputSize;
+ if (src == NULL) { return -1; }
- BYTE* op = (BYTE*) dest;
- BYTE* const oend = op + outputSize;
- BYTE* cpy;
- BYTE* oexit = op + targetOutputSize;
- const BYTE* const lowLimit = lowPrefix - dictSize;
+ { const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
- const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
- const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};
- const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
+ BYTE* op = (BYTE*) dst;
+ BYTE* const oend = op + outputSize;
+ BYTE* cpy;
- const int safeDecode = (endOnInput==endOnInputSize);
- const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
+ const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
+ const int safeDecode = (endOnInput==endOnInputSize);
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
- /* Special cases */
- if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
- if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
- if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
- /* Main Loop : decode sequences */
- while (1) {
- size_t length;
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
const BYTE* match;
size_t offset;
+ unsigned token;
+ size_t length;
- /* get literal length */
- unsigned const token = *ip++;
- if ((length=(token>>ML_BITS)) == RUN_MASK) {
- unsigned s;
- do {
- s = *ip++;
- length += s;
- } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error; /* overflow detection */
- if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error; /* overflow detection */
+
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
+
+ /* Special cases */
+ assert(lowPrefix <= op);
+ if ((endOnInput) && (unlikely(outputSize==0))) {
+ /* Empty output buffer */
+ if (partialDecoding) return 0;
+ return ((srcSize==1) && (*ip==0)) ? 0 : -1;
+ }
+ if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
+ if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
+
+ /* Currently the fast loop shows a regression on qualcomm arm chips. */
+#if LZ4_FAST_DEC_LOOP
+ if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
+ DEBUGLOG(6, "skip fast decode loop");
+ goto safe_decode;
}
- /* copy literals */
- cpy = op+length;
- if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
- || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
- {
- if (partialDecoding) {
- if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
- if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
+ /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
+ assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
+ if (endOnInput) { assert(ip < iend); }
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error);
+ if (error == initial_error) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+
+ /* copy literals */
+ cpy = op+length;
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if (endOnInput) { /* LZ4_decompress_safe() */
+ if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
+ LZ4_wildCopy32(op, ip, cpy);
+ } else { /* LZ4_decompress_fast() */
+ if (cpy>oend-8) { goto safe_literal_copy; }
+ LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
+ * it doesn't know input length, and only relies on end-of-block properties */
+ }
+ ip += length; op = cpy;
} else {
- if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
- if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
+ cpy = op+length;
+ if (endOnInput) { /* LZ4_decompress_safe() */
+ DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
+ /* We don't need to check oend, since we check it once for each loop below */
+ if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
+ /* Literals can only be 14, but hope compilers optimize if we copy by a register size */
+ memcpy(op, ip, 16);
+ } else { /* LZ4_decompress_fast() */
+ /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
+ * it doesn't know input length, and relies on end-of-block properties */
+ memcpy(op, ip, 8);
+ if (length > 8) { memcpy(op+8, ip+8, 8); }
+ }
+ ip += length; op = cpy;
}
- memcpy(op, ip, length);
- ip += length;
- op += length;
- break; /* Necessarily EOF, due to parsing restrictions */
- }
- LZ4_wildCopy(op, ip, cpy);
- ip += length; op = cpy;
-
- /* get offset */
- offset = LZ4_readLE16(ip); ip+=2;
- match = op - offset;
- if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside buffers */
- LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */
-
- /* get matchlength */
- length = token & ML_MASK;
- if (length == ML_MASK) {
- unsigned s;
- do {
- s = *ip++;
- if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
- length += s;
- } while (s==255);
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
+
+ /* get offset */
+ offset = LZ4_readLE16(ip); ip+=2;
+ match = op - offset;
+ assert(match <= op);
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ if (length == ML_MASK) {
+ variable_length_error error = ok;
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
+ if (error != ok) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
+ length += MINMATCH;
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+ } else {
+ length += MINMATCH;
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+
+ /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
+ if ((dict == withPrefix64k) || (match >= lowPrefix)) {
+ if (offset >= 8) {
+ assert(match >= lowPrefix);
+ assert(match <= op);
+ assert(op + 18 <= oend);
+
+ memcpy(op, match, 8);
+ memcpy(op+8, match+8, 8);
+ memcpy(op+16, match+16, 2);
+ op += length;
+ continue;
+ } } }
+
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op+length > oend-LASTLITERALS)) {
+ if (partialDecoding) {
+ length = MIN(length, (size_t)(oend-op)); /* reach end of buffer */
+ } else {
+ goto _output_error; /* end-of-block condition violated */
+ } }
+
+ if (length <= (size_t)(lowPrefix-match)) {
+ /* match fits entirely within external dictionary : just copy */
+ memmove(op, dictEnd - (lowPrefix-match), length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+ memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
+ BYTE* const endOfMatch = op + restSize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) { *op++ = *copyFrom++; }
+ } else {
+ memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ } }
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ assert((op <= oend) && (oend-op >= 32));
+ if (unlikely(offset<16)) {
+ LZ4_memcpy_using_offset(op, match, cpy, offset);
+ } else {
+ LZ4_wildCopy32(op, match, cpy);
+ }
+
+ op = cpy; /* wildcopy correction */
}
- length += MINMATCH;
+ safe_decode:
+#endif
+
+ /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
+
+ /* A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough space,
+ * enter the shortcut and copy 16 bytes on behalf of the literals
+ * (in the fast mode, only 8 bytes can be safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ * manner; but we ensure that there's enough space in the output for
+ * those 18 bytes earlier, upon entering the shortcut (in other words,
+ * there is a combined check for both stages).
+ */
+ if ( (endOnInput ? length != RUN_MASK : length <= 8)
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */
+ && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
+ /* Copy the literals */
+ memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /* The second stage: prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted. */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip); ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ( (length != ML_MASK)
+ && (offset >= 8)
+ && (dict==withPrefix64k || match >= lowPrefix) ) {
+ /* Copy the match. */
+ memcpy(op + 0, match + 0, 8);
+ memcpy(op + 8, match + 8, 8);
+ memcpy(op +16, match +16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /* The second stage didn't work out, but the info is ready.
+ * Propel it right to the point of match copying. */
+ goto _copy_match;
+ }
- /* check external dictionary */
- if ((dict==usingExtDict) && (match < lowPrefix)) {
- if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error);
+ if (error == initial_error) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+ }
- if (length <= (size_t)(lowPrefix-match)) {
- /* match can be copied as a single segment from external dictionary */
- memmove(op, dictEnd - (lowPrefix-match), length);
+ /* copy literals */
+ cpy = op+length;
+#if LZ4_FAST_DEC_LOOP
+ safe_literal_copy:
+#endif
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
+ || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
+ {
+ /* We've either hit the input parsing restriction or the output parsing restriction.
+ * If we've hit the input parsing condition then this must be the last sequence.
+ * If we've hit the output parsing condition then we are either using partialDecoding
+ * or we've hit the output parsing condition.
+ */
+ if (partialDecoding) {
+ /* Since we are partial decoding we may be in this block because of the output parsing
+ * restriction, which is not valid since the output buffer is allowed to be undersized.
+ */
+ assert(endOnInput);
+ /* If we're in this block because of the input parsing condition, then we must be on the
+ * last sequence (or invalid), so we must check that we exactly consume the input.
+ */
+ if ((ip+length>iend-(2+1+LASTLITERALS)) && (ip+length != iend)) { goto _output_error; }
+ assert(ip+length <= iend);
+ /* We are finishing in the middle of a literals segment.
+ * Break after the copy.
+ */
+ if (cpy > oend) {
+ cpy = oend;
+ assert(op<=oend);
+ length = (size_t)(oend-op);
+ }
+ assert(ip+length <= iend);
+ } else {
+ /* We must be on the last sequence because of the parsing limitations so check
+ * that we exactly regenerate the original size (must be exact when !endOnInput).
+ */
+ if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
+ /* We must be on the last sequence (or invalid) because of the parsing limitations
+ * so check that we exactly consume the input and don't overrun the output buffer.
+ */
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) { goto _output_error; }
+ }
+ memmove(op, ip, length); /* supports overlapping memory regions, which only matters for in-place decompression scenarios */
+ ip += length;
op += length;
+ /* Necessarily EOF when !partialDecoding. When partialDecoding
+ * it is EOF if we've either filled the output buffer or hit
+ * the input parsing restriction.
+ */
+ if (!partialDecoding || (cpy == oend) || (ip == iend)) {
+ break;
+ }
} else {
- /* match encompass external dictionary and current block */
- size_t const copySize = (size_t)(lowPrefix-match);
- size_t const restSize = length - copySize;
- memcpy(op, dictEnd - copySize, copySize);
- op += copySize;
- if (restSize > (size_t)(op-lowPrefix)) { /* overlap copy */
- BYTE* const endOfMatch = op + restSize;
- const BYTE* copyFrom = lowPrefix;
- while (op < endOfMatch) *op++ = *copyFrom++;
+ LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
+ ip += length; op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip); ip+=2;
+ match = op - offset;
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ _copy_match:
+ if (length == ML_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
+ if (error != ok) goto _output_error;
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
+ }
+ length += MINMATCH;
+
+#if LZ4_FAST_DEC_LOOP
+ safe_match_copy:
+#endif
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op+length > oend-LASTLITERALS)) {
+ if (partialDecoding) length = MIN(length, (size_t)(oend-op));
+ else goto _output_error; /* doesn't respect parsing restriction */
+ }
+
+ if (length <= (size_t)(lowPrefix-match)) {
+ /* match fits entirely within external dictionary : just copy */
+ memmove(op, dictEnd - (lowPrefix-match), length);
+ op += length;
} else {
- memcpy(op, lowPrefix, restSize);
- op += restSize;
- } }
- continue;
- }
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+ memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
+ BYTE* const endOfMatch = op + restSize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) *op++ = *copyFrom++;
+ } else {
+ memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ } }
+ continue;
+ }
+ assert(match >= lowPrefix);
+
+ /* copy match within block */
+ cpy = op + length;
+
+ /* partialDecoding : may end anywhere within the block */
+ assert(op<=oend);
+ if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = MIN(length, (size_t)(oend-op));
+ const BYTE* const matchEnd = match + mlen;
+ BYTE* const copyEnd = op + mlen;
+ if (matchEnd > op) { /* overlap copy */
+ while (op < copyEnd) { *op++ = *match++; }
+ } else {
+ memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend) { break; }
+ continue;
+ }
- /* copy match within block */
- cpy = op + length;
- if (unlikely(offset<8)) {
- const int dec64 = dec64table[offset];
- op[0] = match[0];
- op[1] = match[1];
- op[2] = match[2];
- op[3] = match[3];
- match += dec32table[offset];
- memcpy(op+4, match, 4);
- match -= dec64;
- } else { LZ4_copy8(op, match); match+=8; }
- op += 8;
-
- if (unlikely(cpy>oend-12)) {
- BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
- if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
- if (op < oCopyLimit) {
- LZ4_wildCopy(op, match, oCopyLimit);
- match += oCopyLimit - op;
- op = oCopyLimit;
+ if (unlikely(offset<8)) {
+ LZ4_write32(op, 0); /* silence msan warning when offset==0 */
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += inc32table[offset];
+ memcpy(op+4, match, 4);
+ match -= dec64table[offset];
+ } else {
+ memcpy(op, match, 8);
+ match += 8;
}
- while (op<cpy) *op++ = *match++;
- } else {
- LZ4_copy8(op, match);
- if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
+ op += 8;
+
+ if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
+ BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
+ if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
+ if (op < oCopyLimit) {
+ LZ4_wildCopy8(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+ while (op < cpy) { *op++ = *match++; }
+ } else {
+ memcpy(op, match, 8);
+ if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
+ }
+ op = cpy; /* wildcopy correction */
}
- op=cpy; /* correction */
- }
- /* end of decoding */
- if (endOnInput)
- return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
- else
- return (int) (((const char*)ip)-source); /* Nb of input bytes read */
+ /* end of decoding */
+ if (endOnInput) {
+ return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
+ } else {
+ return (int) (((const char*)ip)-src); /* Nb of input bytes read */
+ }
- /* Overflow error detected */
-_output_error:
- return (int) (-(((const char*)ip)-source))-1;
+ /* Overflow error detected */
+ _output_error:
+ return (int) (-(((const char*)ip)-src))-1;
+ }
}
+/*===== Instantiate the API decoding functions. =====*/
+
+LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
{
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
+ endOnInputSize, decode_full_block, noDict,
+ (BYTE*)dest, NULL, 0);
}
-int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
+LZ4_FORCE_O2_GCC_PPC64LE
+int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
{
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ endOnInputSize, partial_decode,
+ noDict, (BYTE*)dst, NULL, 0);
}
+LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
{
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
}
+/*===== Instantiate a few more decoding cases, used more than once. =====*/
-/*===== streaming decompression functions =====*/
+LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
+}
-/*
- * If you prefer dynamic allocation methods,
- * LZ4_createStreamDecode()
- * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
+/* Another obsolete API function, paired with the previous one. */
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
+{
+ /* LZ4_decompress_fast doesn't validate match offsets,
+ * and thus serves well with any prefixed dictionary. */
+ return LZ4_decompress_fast(source, dest, originalSize);
+}
+
+LZ4_FORCE_O2_GCC_PPC64LE
+static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, noDict,
+ (BYTE*)dest-prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2_GCC_PPC64LE
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2_GCC_PPC64LE
+static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
*/
+LZ4_FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
+
+/*===== streaming decompression functions =====*/
+
LZ4_streamDecode_t* LZ4_createStreamDecode(void)
{
- LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
+ LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
+ LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
return lz4s;
}
int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
{
+ if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
FREEMEM(LZ4_stream);
return 0;
}
-/*!
- * LZ4_setStreamDecode() :
- * Use this function to instruct where to find the dictionary.
- * This function is not necessary if previous data is still available where it was decoded.
- * Loading a size of 0 is allowed (same effect as no dictionary).
- * Return : 1 if OK, 0 if error
+/*! LZ4_setStreamDecode() :
+ * Use this function to instruct where to find the dictionary.
+ * This function is not necessary if previous data is still available where it was decoded.
+ * Loading a size of 0 is allowed (same effect as no dictionary).
+ * @return : 1 if OK, 0 if error
*/
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
{
@@ -1310,6 +2205,25 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti
return 1;
}
+/*! LZ4_decoderRingBufferSize() :
+ * when setting a ring buffer for streaming decompression (optional scenario),
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * Note : in a ring buffer scenario,
+ * blocks are presumed decompressed next to each other.
+ * When not enough space remains for next block (remainingSize < maxBlockSize),
+ * decoding resumes from beginning of ring buffer.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+int LZ4_decoderRingBufferSize(int maxBlockSize)
+{
+ if (maxBlockSize < 0) return 0;
+ if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
+ if (maxBlockSize < 16) maxBlockSize = 16;
+ return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
+}
+
/*
*_continue() :
These decoding functions allow decompression of multiple blocks in "streaming" mode.
@@ -1317,52 +2231,75 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti
If it's not possible, save the relevant part of decoded data into a safe buffer,
and indicate where it stands using LZ4_setStreamDecode()
*/
+LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
{
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE*)dest) {
- result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
- lz4sd->prefixSize += result;
+ lz4sd->prefixSize += (size_t)result;
lz4sd->prefixEnd += result;
} else {
+ /* The buffer wraps around, or they're switching to another buffer. */
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
- lz4sd->prefixSize = result;
+ lz4sd->prefixSize = (size_t)result;
lz4sd->prefixEnd = (BYTE*)dest + result;
}
return result;
}
+LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
{
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
+ assert(originalSize >= 0);
- if (lz4sd->prefixEnd == (BYTE*)dest) {
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest, originalSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
- lz4sd->prefixSize += originalSize;
+ lz4sd->prefixSize += (size_t)originalSize;
lz4sd->prefixEnd += originalSize;
} else {
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
- lz4sd->prefixSize = originalSize;
+ lz4sd->prefixSize = (size_t)originalSize;
lz4sd->prefixEnd = (BYTE*)dest + originalSize;
}
@@ -1377,32 +2314,27 @@ Advanced decoding functions :
the dictionary must be explicitly provided within parameters
*/
-FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
+int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
{
if (dictSize==0)
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
+ return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
if (dictStart+dictSize == dest) {
- if (dictSize >= (int)(64 KB - 1))
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
+ if (dictSize >= 64 KB - 1) {
+ return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
}
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
-}
-
-int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
-{
- return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
}
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
{
- return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
-}
-
-/* debug function */
-int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+ if (dictSize==0 || dictStart+dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+ assert(dictSize >= 0);
+ return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
}
@@ -1410,64 +2342,67 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compres
* Obsolete Functions
***************************************************/
/* obsolete compression functions */
-int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
-int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
-int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
-int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
-int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
-int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
+}
+int LZ4_compress(const char* src, char* dest, int srcSize)
+{
+ return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
+}
+int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
+}
+int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
+}
+int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
+}
+int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
+}
/*
-These function names are deprecated and should no longer be used.
+These decompression functions are deprecated and should no longer be used.
They are only provided here for compatibility with older user programs.
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
*/
-int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
-int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
-
+int LZ4_uncompress (const char* source, char* dest, int outputSize)
+{
+ return LZ4_decompress_fast(source, dest, outputSize);
+}
+int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
+{
+ return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
+}
/* Obsolete Streaming functions */
int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
-static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
-{
- MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
- lz4ds->internal_donotuse.bufferStart = base;
-}
-
int LZ4_resetStreamState(void* state, char* inputBuffer)
{
- if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
- LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
+ (void)inputBuffer;
+ LZ4_resetStream((LZ4_stream_t*)state);
return 0;
}
void* LZ4_create (char* inputBuffer)
{
- LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
- LZ4_init (lz4ds, (BYTE*)inputBuffer);
- return lz4ds;
+ (void)inputBuffer;
+ return LZ4_createStream();
}
-char* LZ4_slideInputBuffer (void* LZ4_Data)
-{
- LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
- int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
- return (char*)(ctx->bufferStart + dictSize);
-}
-
-/* Obsolete streaming decompression functions */
-
-int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
-}
-
-int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
+char* LZ4_slideInputBuffer (void* state)
{
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
+ /* avoid const char * -> char * conversion warning */
+ return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
}
#endif /* LZ4_COMMONDEFS_ONLY */
diff --git a/src/compat/compat-lz4.h b/src/compat/compat-lz4.h
index 0aae19c..32108e2 100644
--- a/src/compat/compat-lz4.h
+++ b/src/compat/compat-lz4.h
@@ -1,7 +1,7 @@
/*
* LZ4 - Fast LZ compression algorithm
* Header File
- * Copyright (C) 2011-2016, Yann Collet.
+ * Copyright (C) 2011-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
@@ -32,13 +32,13 @@
- LZ4 homepage : http://www.lz4.org
- LZ4 source repository : https://github.com/lz4/lz4
*/
-#ifndef LZ4_H_2983827168210
-#define LZ4_H_2983827168210
-
#if defined (__cplusplus)
extern "C" {
#endif
+#ifndef LZ4_H_2983827168210
+#define LZ4_H_2983827168210
+
/* --- Dependency --- */
#include <stddef.h> /* size_t */
@@ -46,24 +46,31 @@ extern "C" {
/**
Introduction
- LZ4 is lossless compression algorithm, providing compression speed at 400 MB/s per core,
+ LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
The LZ4 compression library provides in-memory compression and decompression functions.
+ It gives full buffer control to user.
Compression can be done in:
- a single step (described as Simple Functions)
- a single step, reusing a context (described in Advanced Functions)
- unbounded multiple steps (described as Streaming compression)
- lz4.h provides block compression functions. It gives full buffer control to user.
- Decompressing an lz4-compressed block also requires metadata (such as compressed size).
- Each application is free to encode such metadata in whichever way it wants.
+ lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
+ Decompressing such a compressed block requires additional metadata.
+ Exact metadata depends on exact decompression function.
+ For the typical case of LZ4_decompress_safe(),
+ metadata includes block's compressed size, and maximum bound of decompressed size.
+ Each application is free to encode and pass such metadata in whichever way it wants.
+
+ lz4.h only handle blocks, it can not generate Frames.
- An additional format, called LZ4 frame specification (doc/lz4_Frame_format.md),
- take care of encoding standard metadata alongside LZ4-compressed blocks.
- If your application requires interoperability, it's recommended to use it.
- A library is provided to take care of it, see lz4frame.h.
+ Blocks are different from Frames (doc/lz4_Frame_format.md).
+ Frames bundle both blocks and metadata in a specified manner.
+ Embedding metadata is required for compressed data to be self-contained and portable.
+ Frame format is delivered through a companion API, declared in lz4frame.h.
+ The `lz4` CLI can only manage frames.
*/
/*^***************************************************************
@@ -72,20 +79,28 @@ extern "C" {
/*
* LZ4_DLL_EXPORT :
* Enable exporting of functions when building a Windows DLL
+* LZ4LIB_VISIBILITY :
+* Control library symbols visibility.
*/
+#ifndef LZ4LIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define LZ4LIB_VISIBILITY
+# endif
+#endif
#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
-# define LZ4LIB_API __declspec(dllexport)
+# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
-# define LZ4LIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
#else
-# define LZ4LIB_API
+# define LZ4LIB_API LZ4LIB_VISIBILITY
#endif
-
-/*========== Version =========== */
+/*------ Version ------*/
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
-#define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */
-#define LZ4_VERSION_RELEASE 5 /* for tweaks, bug-fixes, or development */
+#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
+#define LZ4_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
@@ -94,8 +109,8 @@ extern "C" {
#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION)
-LZ4LIB_API int LZ4_versionNumber (void);
-LZ4LIB_API const char* LZ4_versionString (void);
+LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */
+LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version */
/*-************************************
@@ -104,41 +119,49 @@ LZ4LIB_API const char* LZ4_versionString (void);
/*!
* LZ4_MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
- * Increasing memory usage improves compression ratio
- * Reduced memory usage can improve speed, due to cache effect
+ * Increasing memory usage improves compression ratio.
+ * Reduced memory usage may improve speed, thanks to better cache locality.
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
*/
-#define LZ4_MEMORY_USAGE 14
+#ifndef LZ4_MEMORY_USAGE
+# define LZ4_MEMORY_USAGE 14
+#endif
/*-************************************
* Simple Functions
**************************************/
/*! LZ4_compress_default() :
- Compresses 'sourceSize' bytes from buffer 'source'
- into already allocated 'dest' buffer of size 'maxDestSize'.
- Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize).
- It also runs faster, so it's a recommended setting.
- If the function cannot compress 'source' into a more limited 'dest' budget,
- compression stops *immediately*, and the function result is zero.
- As a consequence, 'dest' content is not valid.
- This function never writes outside 'dest' buffer, nor read outside 'source' buffer.
- sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE
- maxDestSize : full or partial size of buffer 'dest' (which must be already allocated)
- return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize)
- or 0 if compression fails */
-LZ4LIB_API int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize);
+ * Compresses 'srcSize' bytes from buffer 'src'
+ * into already allocated 'dst' buffer of size 'dstCapacity'.
+ * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'src' into a more limited 'dst' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * In which case, 'dst' content is undefined (invalid).
+ * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
+ * dstCapacity : size of buffer 'dst' (which must be already allocated)
+ * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
+ * or 0 if compression fails
+ * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
+ */
+LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
/*! LZ4_decompress_safe() :
- compressedSize : is the precise full size of the compressed block.
- maxDecompressedSize : is the size of destination buffer, which must be already allocated.
- return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize)
- If destination buffer is not large enough, decoding will stop and output an error code (<0).
- If the source stream is detected malformed, the function will stop decoding and return a negative result.
- This function is protected against buffer overflow exploits, including malicious data packets.
- It never writes outside output buffer, nor reads outside input buffer.
-*/
-LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize);
+ * compressedSize : is the exact complete size of the compressed block.
+ * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
+ * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ * Note 1 : This function is protected against malicious data packets :
+ * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
+ * even if the compressed block is maliciously modified to order the decoder to do these actions.
+ * In such case, the decoder stops immediately, and considers the compressed block malformed.
+ * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
+ * The implementation is free to send / store / derive this information in whichever way is most beneficial.
+ * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
+ */
+LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
/*-************************************
@@ -147,184 +170,389 @@ LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compress
#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
-/*!
-LZ4_compressBound() :
+/*! LZ4_compressBound() :
Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
This function is primarily useful for memory allocation purposes (destination buffer size).
Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
- Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize)
+ Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
inputSize : max supported value is LZ4_MAX_INPUT_SIZE
return : maximum output size in a "worst case" scenario
- or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE)
+ or 0, if input size is incorrect (too large or negative)
*/
LZ4LIB_API int LZ4_compressBound(int inputSize);
-/*!
-LZ4_compress_fast() :
- Same as LZ4_compress_default(), but allows to select an "acceleration" factor.
+/*! LZ4_compress_fast() :
+ Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
An acceleration value of "1" is the same as regular LZ4_compress_default()
- Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1.
+ Values <= 0 will be replaced by ACCELERATION_DEFAULT (currently == 1, see lz4.c).
*/
-LZ4LIB_API int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration);
+LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-/*!
-LZ4_compress_fast_extState() :
- Same compression function, just using an externally allocated memory space to store compression state.
- Use LZ4_sizeofState() to know how much memory must be allocated,
- and allocate it on 8-bytes boundaries (using malloc() typically).
- Then, provide it as 'void* state' to compression function.
-*/
+/*! LZ4_compress_fast_extState() :
+ * Same as LZ4_compress_fast(), using an externally allocated memory space for its state.
+ * Use LZ4_sizeofState() to know how much memory must be allocated,
+ * and allocate it on 8-bytes boundaries (using `malloc()` typically).
+ * Then, provide this buffer as `void* state` to compression function.
+ */
LZ4LIB_API int LZ4_sizeofState(void);
-LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration);
-
-
-/*!
-LZ4_compress_destSize() :
- Reverse the logic, by compressing as much data as possible from 'source' buffer
- into already allocated buffer 'dest' of size 'targetDestSize'.
- This function either compresses the entire 'source' content into 'dest' if it's large enough,
- or fill 'dest' buffer completely with as much data as possible from 'source'.
- *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'.
- New value is necessarily <= old value.
- return : Nb bytes written into 'dest' (necessarily <= targetDestSize)
- or 0 if compression fails
-*/
-LZ4LIB_API int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize);
-
-
-/*!
-LZ4_decompress_fast() :
- originalSize : is the original and therefore uncompressed size
- return : the number of bytes read from the source buffer (in other words, the compressed size)
- If the source stream is detected malformed, the function will stop decoding and return a negative result.
- Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes.
- note : This function fully respect memory boundaries for properly formed compressed data.
- It is a bit faster than LZ4_decompress_safe().
- However, it does not provide any protection against intentionally modified data stream (malicious input).
- Use this function in trusted environment only (data to decode comes from a trusted source).
-*/
-LZ4LIB_API int LZ4_decompress_fast (const char* source, char* dest, int originalSize);
-
-/*!
-LZ4_decompress_safe_partial() :
- This function decompress a compressed block of size 'compressedSize' at position 'source'
- into destination buffer 'dest' of size 'maxDecompressedSize'.
- The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached,
- reducing decompression time.
- return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize)
- Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller.
- Always control how many bytes were decoded.
- If the source stream is detected malformed, the function will stop decoding and return a negative result.
- This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets
+LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+
+/*! LZ4_compress_destSize() :
+ * Reverse the logic : compresses as much data as possible from 'src' buffer
+ * into already allocated buffer 'dst', of size >= 'targetDestSize'.
+ * This function either compresses the entire 'src' content into 'dst' if it's large enough,
+ * or fill 'dst' buffer completely with as much data as possible from 'src'.
+ * note: acceleration parameter is fixed to "default".
+ *
+ * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
+ * New value is necessarily <= input value.
+ * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
+ * or 0 if compression fails.
*/
-LZ4LIB_API int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize);
+LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
+
+
+/*! LZ4_decompress_safe_partial() :
+ * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
+ * into destination buffer 'dst' of size 'dstCapacity'.
+ * Up to 'targetOutputSize' bytes will be decoded.
+ * The function stops decoding on reaching this objective,
+ * which can boost performance when only the beginning of a block is required.
+ *
+ * @return : the number of bytes decoded in `dst` (necessarily <= dstCapacity)
+ * If source stream is detected malformed, function returns a negative result.
+ *
+ * Note : @return can be < targetOutputSize, if compressed block contains less data.
+ *
+ * Note 2 : this function features 2 parameters, targetOutputSize and dstCapacity,
+ * and expects targetOutputSize <= dstCapacity.
+ * It effectively stops decoding on reaching targetOutputSize,
+ * so dstCapacity is kind of redundant.
+ * This is because in a previous version of this function,
+ * decoding operation would not "break" a sequence in the middle.
+ * As a consequence, there was no guarantee that decoding would stop at exactly targetOutputSize,
+ * it could write more bytes, though only up to dstCapacity.
+ * Some "margin" used to be required for this operation to work properly.
+ * This is no longer necessary.
+ * The function nonetheless keeps its signature, in an effort to not break API.
+ */
+LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
/*-*********************************************
* Streaming Compression Functions
***********************************************/
-typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
+typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
-/*! LZ4_createStream() and LZ4_freeStream() :
- * LZ4_createStream() will allocate and initialize an `LZ4_stream_t` structure.
- * LZ4_freeStream() releases its memory.
- */
LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
-/*! LZ4_resetStream() :
- * An LZ4_stream_t structure can be allocated once and re-used multiple times.
- * Use this function to init an allocated `LZ4_stream_t` structure and start a new compression.
+/*! LZ4_resetStream_fast() : v1.9.0+
+ * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
+ * (e.g., LZ4_compress_fast_continue()).
+ *
+ * An LZ4_stream_t must be initialized once before usage.
+ * This is automatically done when created by LZ4_createStream().
+ * However, should the LZ4_stream_t be simply declared on stack (for example),
+ * it's necessary to initialize it first, using LZ4_initStream().
+ *
+ * After init, start any new stream with LZ4_resetStream_fast().
+ * A same LZ4_stream_t can be re-used multiple times consecutively
+ * and compress multiple streams,
+ * provided that it starts each new stream with LZ4_resetStream_fast().
+ *
+ * LZ4_resetStream_fast() is much faster than LZ4_initStream(),
+ * but is not compatible with memory regions containing garbage data.
+ *
+ * Note: it's only useful to call LZ4_resetStream_fast()
+ * in the context of streaming compression.
+ * The *extState* functions perform their own resets.
+ * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
*/
-LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
+LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
/*! LZ4_loadDict() :
- * Use this function to load a static dictionary into LZ4_stream.
- * Any previous data will be forgotten, only 'dictionary' will remain in memory.
- * Loading a size of 0 is allowed.
- * Return : dictionary size, in bytes (necessarily <= 64 KB)
+ * Use this function to reference a static dictionary into LZ4_stream_t.
+ * The dictionary must remain available during compression.
+ * LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
+ * The same dictionary will have to be loaded on decompression side for successful decoding.
+ * Dictionary are useful for better compression of small data (KB range).
+ * While LZ4 accept any input as dictionary,
+ * results are generally better when using Zstandard's Dictionary Builder.
+ * Loading a size of 0 is allowed, and is the same as reset.
+ * @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
*/
LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
/*! LZ4_compress_fast_continue() :
- * Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio.
- * Important : Previous data blocks are assumed to still be present and unmodified !
- * 'dst' buffer must be already allocated.
- * If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
- * If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a zero.
+ * Compress 'src' content using data from previously compressed blocks, for better compression ratio.
+ * 'dst' buffer must be already allocated.
+ * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
+ *
+ * @return : size of compressed block
+ * or 0 if there is an error (typically, cannot fit into 'dst').
+ *
+ * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block.
+ * Each block has precise boundaries.
+ * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata.
+ * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
+ *
+ * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory !
+ *
+ * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
+ * Make sure that buffers are separated, by at least one byte.
+ * This construction ensures that each block only depends on previous block.
+ *
+ * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
+ *
+ * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
*/
-LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int maxDstSize, int acceleration);
+LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
/*! LZ4_saveDict() :
- * If previously compressed data block is not guaranteed to remain available at its memory location,
+ * If last 64KB data cannot be guaranteed to remain available at its current memory location,
* save it into a safer place (char* safeBuffer).
- * Note : you don't need to call LZ4_loadDict() afterwards,
- * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
- * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
+ * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
+ * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
+ * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
*/
-LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize);
+LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize);
/*-**********************************************
* Streaming Decompression Functions
* Bufferless synchronous API
************************************************/
-typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* incomplete type (defined later) */
+typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
-/* creation / destruction of streaming decompression tracking structure */
+/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
+ * creation / destruction of streaming decompression tracking context.
+ * A tracking context can be re-used multiple times.
+ */
LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
/*! LZ4_setStreamDecode() :
- * Use this function to instruct where to find the dictionary.
- * Setting a size of 0 is allowed (same effect as reset).
- * @return : 1 if OK, 0 if error
+ * An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
+ * Use this function to start decompression of a new stream of blocks.
+ * A dictionary can optionally be set. Use NULL or size 0 for a reset order.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
+ * @return : 1 if OK, 0 if error
*/
LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
-/*!
-LZ4_decompress_*_continue() :
- These decoding functions allow decompression of multiple blocks in "streaming" mode.
- Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB)
- In the case of a ring buffers, decoding buffer must be either :
- - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions)
- In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB).
- - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
- maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single block.
- In which case, encoding and decoding buffers do not need to be synchronized,
- and encoding ring buffer can have any size, including small ones ( < 64 KB).
- - _At least_ 64 KB + 8 bytes + maxBlockSize.
- In which case, encoding and decoding buffers do not need to be synchronized,
- and encoding ring buffer can have any size, including larger than decoding buffer.
- Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer,
- and indicate where it is saved using LZ4_setStreamDecode()
+/*! LZ4_decoderRingBufferSize() : v1.8.2+
+ * Note : in a ring buffer scenario (optional),
+ * blocks are presumed decompressed next to each other
+ * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize),
+ * at which stage it resumes from beginning of ring buffer.
+ * When setting such a ring buffer for streaming decompression,
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
+#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
+
+/*! LZ4_decompress_*_continue() :
+ * These decoding functions allow decompression of consecutive blocks in "streaming" mode.
+ * A block is an unsplittable entity, it must be presented entirely to a decompression function.
+ * Decompression functions only accepts one block at a time.
+ * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded.
+ * If less than 64KB of data has been decoded, all the data must be present.
+ *
+ * Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
+ * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize).
+ * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized.
+ * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize.
+ * - Synchronized mode :
+ * Decompression buffer size is _exactly_ the same as compression buffer size,
+ * and follows exactly same update rule (block boundaries at same positions),
+ * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream),
+ * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB).
+ * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized,
+ * and encoding ring buffer can have any size, including small ones ( < 64 KB).
+ *
+ * Whenever these conditions are not possible,
+ * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
+ * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
*/
-LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize);
-LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize);
+LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity);
/*! LZ4_decompress_*_usingDict() :
* These decoding functions work the same as
* a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
* They are stand-alone, and don't need an LZ4_streamDecode_t structure.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
+ * Performance tip : Decompression speed can be substantially increased
+ * when dst == dictStart + dictSize.
*/
-LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize);
-LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize);
+LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
+#endif /* LZ4_H_2983827168210 */
-/*^**********************************************
+
+/*^*************************************
* !!!!!! STATIC LINKING ONLY !!!!!!
- ***********************************************/
-/*-************************************
- * Private definitions
- **************************************
- * Do not use these definitions.
- * They are exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
- * Using these definitions will expose code to API and/or ABI break in future versions of the library.
- **************************************/
+ ***************************************/
+
+/*-****************************************************************************
+ * Experimental section
+ *
+ * Symbols declared in this section must be considered unstable. Their
+ * signatures or semantics may change, or they may be removed altogether in the
+ * future. They are therefore only safe to depend on when the caller is
+ * statically linked against the library.
+ *
+ * To protect against unsafe usage, not only are the declarations guarded,
+ * the definitions are hidden by default
+ * when building LZ4 as a shared/dynamic library.
+ *
+ * In order to access these declarations,
+ * define LZ4_STATIC_LINKING_ONLY in your application
+ * before including LZ4's headers.
+ *
+ * In order to make their implementations accessible dynamically, you must
+ * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
+ ******************************************************************************/
+
+#ifdef LZ4_STATIC_LINKING_ONLY
+
+#ifndef LZ4_STATIC_3504398509
+#define LZ4_STATIC_3504398509
+
+#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
+#define LZ4LIB_STATIC_API LZ4LIB_API
+#else
+#define LZ4LIB_STATIC_API
+#endif
+
+
+/*! LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step.
+ * It is only safe to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized").
+ * From a high level, the difference is that
+ * this function initializes the provided state with a call to something like LZ4_resetStream_fast()
+ * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream().
+ */
+LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+/*! LZ4_attach_dictionary() :
+ * This is an experimental API that allows
+ * efficient use of a static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
+ * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDict() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionaryStream may be NULL,
+ * in which case any existing dictionary stream is unset.
+ *
+ * If a dictionary is provided, it replaces any pre-existing stream history.
+ * The dictionary contents are the only history that can be referenced and
+ * logically immediately precede the data compressed in the first subsequent
+ * compression call.
+ *
+ * The dictionary will only remain attached to the working stream through the
+ * first compression call, at the end of which it is cleared. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the completion of the first compression call on the stream.
+ */
+LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream);
+
+
+/*! In-place compression and decompression
+ *
+ * It's possible to have input and output sharing the same buffer,
+ * for highly contrained memory environments.
+ * In both cases, it requires input to lay at the end of the buffer,
+ * and decompression to start at beginning of the buffer.
+ * Buffer size must feature some margin, hence be larger than final size.
+ *
+ * |<------------------------buffer--------------------------------->|
+ * |<-----------compressed data--------->|
+ * |<-----------decompressed size------------------>|
+ * |<----margin---->|
+ *
+ * This technique is more useful for decompression,
+ * since decompressed size is typically larger,
+ * and margin is short.
+ *
+ * In-place decompression will work inside any buffer
+ * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
+ * This presumes that decompressedSize > compressedSize.
+ * Otherwise, it means compression actually expanded data,
+ * and it would be more efficient to store such data with a flag indicating it's not compressed.
+ * This can happen when data is not compressible (already compressed, or encrypted).
+ *
+ * For in-place compression, margin is larger, as it must be able to cope with both
+ * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
+ * and data expansion, which can happen when input is not compressible.
+ * As a consequence, buffer size requirements are much higher,
+ * and memory savings offered by in-place compression are more limited.
+ *
+ * There are ways to limit this cost for compression :
+ * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
+ * Note that it is a compile-time constant, so all compressions will apply this limit.
+ * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
+ * so it's a reasonable trick when inputs are known to be small.
+ * - Require the compressor to deliver a "maximum compressed size".
+ * This is the `dstCapacity` parameter in `LZ4_compress*()`.
+ * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
+ * in which case, the return code will be 0 (zero).
+ * The caller must be ready for these cases to happen,
+ * and typically design a backup scheme to send data uncompressed.
+ * The combination of both techniques can significantly reduce
+ * the amount of margin required for in-place compression.
+ *
+ * In-place compression can work in any buffer
+ * which size is >= (maxCompressedSize)
+ * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
+ * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
+ * so it's possible to reduce memory requirements by playing with them.
+ */
+
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
+#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
+#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
+
+#endif /* LZ4_STATIC_3504398509 */
+#endif /* LZ4_STATIC_LINKING_ONLY */
+
+
+
+#ifndef LZ4_H_98237428734687
+#define LZ4_H_98237428734687
+
+/*-************************************************************
+ * PRIVATE DEFINITIONS
+ **************************************************************
+ * Do not use these definitions directly.
+ * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
+ * Accessing members will expose code to API and/or ABI break in future versions of the library.
+ **************************************************************/
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
@@ -332,14 +560,16 @@ LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, in
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
#include <stdint.h>
-typedef struct {
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
+struct LZ4_stream_t_internal {
uint32_t hashTable[LZ4_HASH_SIZE_U32];
uint32_t currentOffset;
- uint32_t initCheck;
+ uint16_t dirty;
+ uint16_t tableType;
const uint8_t* dictionary;
- uint8_t* bufferStart; /* obsolete, used for slideInputBuffer */
+ const LZ4_stream_t_internal* dictCtx;
uint32_t dictSize;
-} LZ4_stream_t_internal;
+};
typedef struct {
const uint8_t* externalDict;
@@ -350,49 +580,67 @@ typedef struct {
#else
-typedef struct {
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
+struct LZ4_stream_t_internal {
unsigned int hashTable[LZ4_HASH_SIZE_U32];
unsigned int currentOffset;
- unsigned int initCheck;
+ unsigned short dirty;
+ unsigned short tableType;
const unsigned char* dictionary;
- unsigned char* bufferStart; /* obsolete, used for slideInputBuffer */
+ const LZ4_stream_t_internal* dictCtx;
unsigned int dictSize;
-} LZ4_stream_t_internal;
+};
typedef struct {
const unsigned char* externalDict;
- size_t extDictSize;
const unsigned char* prefixEnd;
+ size_t extDictSize;
size_t prefixSize;
} LZ4_streamDecode_t_internal;
#endif
-/*!
- * LZ4_stream_t :
- * information structure to track an LZ4 stream.
- * init this structure before first use.
- * note : only use in association with static linking !
- * this definition is not API/ABI safe,
- * and may change in a future version !
+/*! LZ4_stream_t :
+ * information structure to track an LZ4 stream.
+ * LZ4_stream_t can also be created using LZ4_createStream(), which is recommended.
+ * The structure definition can be convenient for static allocation
+ * (on stack, or as part of larger structure).
+ * Init this structure with LZ4_initStream() before first use.
+ * note : only use this definition in association with static linking !
+ * this definition is not API/ABI safe, and may change in a future version.
*/
-#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4)
+#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4 + ((sizeof(void*)==16) ? 4 : 0) /*AS-400*/ )
#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
union LZ4_stream_u {
unsigned long long table[LZ4_STREAMSIZE_U64];
LZ4_stream_t_internal internal_donotuse;
} ; /* previously typedef'd to LZ4_stream_t */
+/*! LZ4_initStream() : v1.9.0+
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is automatically done when invoking LZ4_createStream(),
+ * but it's not when the structure is simply declared on stack (for example).
+ *
+ * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t.
+ * It can also initialize any arbitrary buffer of sufficient size,
+ * and will @return a pointer of proper type upon initialization.
+ *
+ * Note : initialization fails if size and alignment conditions are not respected.
+ * In which case, the function will @return NULL.
+ * Note2: An LZ4_stream_t structure guarantees correct alignment and size.
+ * Note3: Before v1.9.0, use LZ4_resetStream() instead
+ */
+LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
-/*!
- * LZ4_streamDecode_t :
- * information structure to track an LZ4 stream during decompression.
- * init this structure using LZ4_setStreamDecode (or memset()) before first use
- * note : only use in association with static linking !
- * this definition is not API/ABI safe,
- * and may change in a future version !
+
+/*! LZ4_streamDecode_t :
+ * information structure to track an LZ4 stream during decompression.
+ * init this structure using LZ4_setStreamDecode() before first use.
+ * note : only use in association with static linking !
+ * this definition is not API/ABI safe,
+ * and may change in a future version !
*/
-#define LZ4_STREAMDECODESIZE_U64 4
+#define LZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ )
#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
union LZ4_streamDecode_u {
unsigned long long table[LZ4_STREAMDECODESIZE_U64];
@@ -400,15 +648,22 @@ union LZ4_streamDecode_u {
} ; /* previously typedef'd to LZ4_streamDecode_t */
-/*=************************************
+
+/*-************************************
* Obsolete Functions
**************************************/
-/* Deprecation warnings */
-/* Should these warnings be a problem,
- it is generally possible to disable them,
- typically with -Wno-deprecated-declarations for gcc
- or _CRT_SECURE_NO_WARNINGS in Visual.
- Otherwise, it's also possible to define LZ4_DISABLE_DEPRECATE_WARNINGS */
+
+/*! Deprecation warnings
+ *
+ * Deprecated functions make the compiler generate a warning when invoked.
+ * This is meant to invite users to update their source code.
+ * Should deprecation warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc
+ * or _CRT_SECURE_NO_WARNINGS in Visual.
+ *
+ * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS
+ * before including the header file.
+ */
#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
#else
@@ -428,36 +683,82 @@ union LZ4_streamDecode_u {
#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
/* Obsolete compression functions */
-LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress (const char* source, char* dest, int sourceSize);
-LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
/* Obsolete decompression functions */
-/* These function names are completely deprecated and must no longer be used.
- They are only provided in lz4.c for compatibility with older programs.
- - LZ4_uncompress is the same as LZ4_decompress_fast
- - LZ4_uncompress_unknownOutputSize is the same as LZ4_decompress_safe
- These function prototypes are now disabled; uncomment them only if you really need them.
- It is highly recommended to stop using these prototypes and migrate to maintained ones */
-/* int LZ4_uncompress (const char* source, char* dest, int outputSize); */
-/* int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); */
-
-/* Obsolete streaming functions; use new streaming interface whenever possible */
-LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer);
-LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void);
-LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer);
-LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state);
+LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
+LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
+
+/* Obsolete streaming functions; degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, they don't
+ * actually retain any history between compression calls. The compression ratio
+ * achieved will therefore be no better than compressing each chunk
+ * independently.
+ */
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void);
+LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
/* Obsolete streaming decoding functions */
-LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
-LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
+LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
+
+/*! LZ4_decompress_fast() : **unsafe!**
+ * These functions used to be faster than LZ4_decompress_safe(),
+ * but it has changed, and they are now slower than LZ4_decompress_safe().
+ * This is because LZ4_decompress_fast() doesn't know the input size,
+ * and therefore must progress more cautiously in the input buffer to not read beyond the end of block.
+ * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
+ * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
+ *
+ * The last remaining LZ4_decompress_fast() specificity is that
+ * it can decompress a block without knowing its compressed size.
+ * Such functionality could be achieved in a more secure manner,
+ * by also providing the maximum size of input buffer,
+ * but it would require new prototypes, and adaptation of the implementation to this new use case.
+ *
+ * Parameters:
+ * originalSize : is the uncompressed size to regenerate.
+ * `dst` must be already allocated, its size must be >= 'originalSize' bytes.
+ * @return : number of bytes read from source buffer (== compressed size).
+ * The function expects to finish at block's end exactly.
+ * If the source stream is detected malformed, the function stops decoding and returns a negative result.
+ * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer.
+ * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds.
+ * Also, since match offsets are not validated, match reads from 'src' may underflow too.
+ * These issues never happen if input (compressed) data is correct.
+ * But they may happen if input data is invalid (error or intentional tampering).
+ * As a consequence, use these functions in trusted environments with trusted data **only**.
+ */
+
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
+LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
+LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
+
+/*! LZ4_resetStream() :
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is done with LZ4_initStream(), or LZ4_resetStream().
+ * Consider switching to LZ4_initStream(),
+ * invoking LZ4_resetStream() will trigger deprecation warnings in the future.
+ */
+LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
+
+
+#endif /* LZ4_H_98237428734687 */
#if defined (__cplusplus)
}
#endif
-
-#endif /* LZ4_H_2983827168210 */
diff --git a/src/openvpn/errlevel.h b/src/openvpn/errlevel.h
index e448fc3..5663f84 100644
--- a/src/openvpn/errlevel.h
+++ b/src/openvpn/errlevel.h
@@ -91,6 +91,7 @@
#define D_OSBUF LOGLEV(3, 43, 0) /* show socket/tun/tap buffer sizes */
#define D_PS_PROXY LOGLEV(3, 44, 0) /* messages related to --port-share option */
#define D_PF_INFO LOGLEV(3, 45, 0) /* packet filter informational messages */
+#define D_IFCONFIG LOGLEV(3, 0, 0) /* show ifconfig info (don't mute) */
#define D_SHOW_PARMS LOGLEV(4, 50, 0) /* show all parameters on program initiation */
#define D_SHOW_OCC LOGLEV(4, 51, 0) /* show options compatibility string */
diff --git a/src/openvpn/init.c b/src/openvpn/init.c
index a785934..31ecadc 100644
--- a/src/openvpn/init.c
+++ b/src/openvpn/init.c
@@ -500,6 +500,17 @@ next_connection_entry(struct context *c)
*/
if (!c->options.persist_remote_ip)
{
+ /* Connection entry addrinfo objects might have been
+ * resolved earlier but the entry itself might have been
+ * skipped by management on the previous loop.
+ * If so, clear the addrinfo objects as close_instance does
+ */
+ if (c->c1.link_socket_addr.remote_list)
+ {
+ clear_remote_addrlist(&c->c1.link_socket_addr,
+ !c->options.resolve_in_advance);
+ }
+
/* close_instance should have cleared the addrinfo objects */
ASSERT(c->c1.link_socket_addr.current_remote == NULL);
ASSERT(c->c1.link_socket_addr.remote_list == NULL);
@@ -3635,7 +3646,8 @@ do_close_link_socket(struct context *c)
&& ( (c->options.persist_remote_ip)
||
( c->sig->source != SIG_SOURCE_HARD
- && ((c->c1.link_socket_addr.current_remote && c->c1.link_socket_addr.current_remote->ai_next)
+ && ((c->c1.link_socket_addr.current_remote
+ && c->c1.link_socket_addr.current_remote->ai_next)
|| c->options.no_advance))
)))
{
diff --git a/src/openvpn/manage.c b/src/openvpn/manage.c
index 898cb3b..ac14217 100644
--- a/src/openvpn/manage.c
+++ b/src/openvpn/manage.c
@@ -3310,12 +3310,17 @@ man_block(struct management *man, volatile int *signal_received, const time_t ex
if (man_standalone_ok(man))
{
+ /* expire time can be already overdue, for this case init zero
+ * timeout to avoid waiting first time and exit loop early with
+ * either obtained event or timeout.
+ */
+ tv.tv_usec = 0;
+ tv.tv_sec = 0;
+
while (true)
{
event_reset(man->connection.es);
management_socket_set(man, man->connection.es, NULL, NULL);
- tv.tv_usec = 0;
- tv.tv_sec = 1;
if (man_check_for_signals(signal_received))
{
status = -1;
@@ -3343,6 +3348,10 @@ man_block(struct management *man, volatile int *signal_received, const time_t ex
}
break;
}
+
+ /* wait one second more */
+ tv.tv_sec = 1;
+ tv.tv_usec = 0;
}
}
return status;
@@ -3444,7 +3453,7 @@ management_event_loop_n_seconds(struct management *man, int sec)
/* set expire time */
update_time();
- if (sec)
+ if (sec >= 0)
{
expire = now + sec;
}
@@ -3474,7 +3483,7 @@ management_event_loop_n_seconds(struct management *man, int sec)
/* revert state */
man->persist.standalone_disabled = standalone_disabled_save;
}
- else
+ else if (sec > 0)
{
sleep(sec);
}
@@ -4117,11 +4126,15 @@ log_history_ref(const struct log_history *h, const int index)
void
management_sleep(const int n)
{
- if (management)
+ if (n < 0)
+ {
+ return;
+ }
+ else if (management)
{
management_event_loop_n_seconds(management, n);
}
- else
+ else if (n > 0)
{
sleep(n);
}
@@ -4132,7 +4145,10 @@ management_sleep(const int n)
void
management_sleep(const int n)
{
- sleep(n);
+ if (n > 0)
+ {
+ sleep(n);
+ }
}
#endif /* ENABLE_MANAGEMENT */
diff --git a/src/openvpn/networking_iproute2.c b/src/openvpn/networking_iproute2.c
index f3b9c61..3b46052 100644
--- a/src/openvpn/networking_iproute2.c
+++ b/src/openvpn/networking_iproute2.c
@@ -88,6 +88,8 @@ net_iface_mtu_set(openvpn_net_ctx_t *ctx, const char *iface, uint32_t mtu)
argv_msg(M_INFO, &argv);
openvpn_execve_check(&argv, ctx->es, S_FATAL, "Linux ip link set failed");
+ argv_free(&argv);
+
return 0;
}
diff --git a/src/openvpn/networking_sitnl.c b/src/openvpn/networking_sitnl.c
index 713a213..2bc70a5 100644
--- a/src/openvpn/networking_sitnl.c
+++ b/src/openvpn/networking_sitnl.c
@@ -345,6 +345,13 @@ sitnl_send(struct nlmsghdr *payload, pid_t peer, unsigned int groups,
* continue;
* }
*/
+
+ if (h->nlmsg_type == NLMSG_DONE)
+ {
+ ret = 0;
+ goto out;
+ }
+
if (h->nlmsg_type == NLMSG_ERROR)
{
err = (struct nlmsgerr *)NLMSG_DATA(h);
@@ -360,7 +367,11 @@ sitnl_send(struct nlmsghdr *payload, pid_t peer, unsigned int groups,
ret = 0;
if (cb)
{
- ret = cb(h, arg_cb);
+ int r = cb(h, arg_cb);
+ if (r <= 0)
+ {
+ ret = r;
+ }
}
}
else
@@ -375,8 +386,12 @@ sitnl_send(struct nlmsghdr *payload, pid_t peer, unsigned int groups,
if (cb)
{
- ret = cb(h, arg_cb);
- goto out;
+ int r = cb(h, arg_cb);
+ if (r <= 0)
+ {
+ ret = r;
+ goto out;
+ }
}
else
{
@@ -410,6 +425,7 @@ typedef struct {
int addr_size;
inet_address_t gw;
char iface[IFNAMSIZ];
+ bool default_only;
} route_res_t;
static int
@@ -421,6 +437,12 @@ sitnl_route_save(struct nlmsghdr *n, void *arg)
int len = n->nlmsg_len - NLMSG_LENGTH(sizeof(*r));
unsigned int ifindex = 0;
+ /* filter-out non-zero dst prefixes */
+ if (res->default_only && r->rtm_dst_len != 0)
+ {
+ return 1;
+ }
+
while (RTA_OK(rta, len))
{
switch (rta->rta_type)
@@ -477,11 +499,25 @@ sitnl_route_best_gw(sa_family_t af_family, const inet_address_t *dst,
{
case AF_INET:
res.addr_size = sizeof(in_addr_t);
- req.n.nlmsg_flags |= NLM_F_DUMP;
+ /*
+ * kernel can't return 0.0.0.0/8 host route, dump all
+ * the routes and filter for 0.0.0.0/0 in cb()
+ */
+ if (!dst || !dst->ipv4)
+ {
+ req.n.nlmsg_flags |= NLM_F_DUMP;
+ res.default_only = true;
+ }
+ else
+ {
+ req.r.rtm_dst_len = 32;
+ }
break;
case AF_INET6:
res.addr_size = sizeof(struct in6_addr);
+ /* kernel can return ::/128 host route */
+ req.r.rtm_dst_len = 128;
break;
default:
diff --git a/src/openvpn/openvpn.vcxproj b/src/openvpn/openvpn.vcxproj
index 5367979..3863854 100644
--- a/src/openvpn/openvpn.vcxproj
+++ b/src/openvpn/openvpn.vcxproj
@@ -92,7 +92,7 @@
</ClCompile>
<ResourceCompile />
<Link>
- <AdditionalDependencies>legacy_stdio_definitions.lib;Ncrypt.lib;libssl.lib;libcrypto.lib;lzo2.lib;pkcs11-helper.dll.lib;gdi32.lib;ws2_32.lib;wininet.lib;crypt32.lib;iphlpapi.lib;winmm.lib;Fwpuclnt.lib;Rpcrt4.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalDependencies>legacy_stdio_definitions.lib;Ncrypt.lib;libssl.lib;libcrypto.lib;lzo2.lib;pkcs11-helper.dll.lib;gdi32.lib;ws2_32.lib;wininet.lib;crypt32.lib;iphlpapi.lib;winmm.lib;Fwpuclnt.lib;Rpcrt4.lib;setupapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(OPENSSL_HOME)/lib;$(LZO_HOME)/lib;$(PKCS11H_HOME)/lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<SubSystem>Console</SubSystem>
</Link>
@@ -122,7 +122,7 @@
</ClCompile>
<ResourceCompile />
<Link>
- <AdditionalDependencies>legacy_stdio_definitions.lib;Ncrypt.lib;libssl.lib;libcrypto.lib;lzo2.lib;pkcs11-helper.dll.lib;gdi32.lib;ws2_32.lib;wininet.lib;crypt32.lib;iphlpapi.lib;winmm.lib;Fwpuclnt.lib;Rpcrt4.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalDependencies>legacy_stdio_definitions.lib;Ncrypt.lib;libssl.lib;libcrypto.lib;lzo2.lib;pkcs11-helper.dll.lib;gdi32.lib;ws2_32.lib;wininet.lib;crypt32.lib;iphlpapi.lib;winmm.lib;Fwpuclnt.lib;Rpcrt4.lib;setupapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(OPENSSL_HOME)/lib;$(LZO_HOME)/lib;$(PKCS11H_HOME)/lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<SubSystem>Console</SubSystem>
</Link>
diff --git a/src/openvpn/options.c b/src/openvpn/options.c
index 8bf82c5..658ca53 100644
--- a/src/openvpn/options.c
+++ b/src/openvpn/options.c
@@ -1983,7 +1983,8 @@ connection_entry_load_re(struct connection_entry *ce, const struct remote_entry
}
static void
-options_postprocess_verify_ce(const struct options *options, const struct connection_entry *ce)
+options_postprocess_verify_ce(const struct options *options,
+ const struct connection_entry *ce)
{
struct options defaults;
int dev = DEV_TYPE_UNDEF;
@@ -2011,7 +2012,9 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
*/
if (ce->proto == PROTO_TCP)
{
- msg(M_USAGE, "--proto tcp is ambiguous in this context. Please specify --proto tcp-server or --proto tcp-client");
+ msg(M_USAGE,
+ "--proto tcp is ambiguous in this context. Please specify "
+ "--proto tcp-server or --proto tcp-client");
}
/*
@@ -2051,8 +2054,9 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
if (options->inetd)
{
- msg(M_WARN, "DEPRECATED OPTION: --inetd mode is deprecated "
- "and will be removed in OpenVPN 2.6");
+ msg(M_WARN,
+ "DEPRECATED OPTION: --inetd mode is deprecated and will be removed "
+ "in OpenVPN 2.6");
}
if (options->lladdr && dev != DEV_TYPE_TAP)
@@ -2065,7 +2069,9 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
*/
if (options->ce.tun_mtu_defined && options->ce.link_mtu_defined)
{
- msg(M_USAGE, "only one of --tun-mtu or --link-mtu may be defined (note that --ifconfig implies --link-mtu %d)", LINK_MTU_DEFAULT);
+ msg(M_USAGE,
+ "only one of --tun-mtu or --link-mtu may be defined (note that "
+ "--ifconfig implies --link-mtu %d)", LINK_MTU_DEFAULT);
}
if (!proto_is_udp(ce->proto) && options->mtu_test)
@@ -2092,18 +2098,23 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
if (string_defined_equal(ce->remote, options->ifconfig_local)
|| string_defined_equal(ce->remote, options->ifconfig_remote_netmask))
{
- msg(M_USAGE, "--local and --remote addresses must be distinct from --ifconfig addresses");
+ msg(M_USAGE,
+ "--local and --remote addresses must be distinct from --ifconfig "
+ "addresses");
}
if (string_defined_equal(ce->local, options->ifconfig_local)
|| string_defined_equal(ce->local, options->ifconfig_remote_netmask))
{
- msg(M_USAGE, "--local addresses must be distinct from --ifconfig addresses");
+ msg(M_USAGE,
+ "--local addresses must be distinct from --ifconfig addresses");
}
- if (string_defined_equal(options->ifconfig_local, options->ifconfig_remote_netmask))
+ if (string_defined_equal(options->ifconfig_local,
+ options->ifconfig_remote_netmask))
{
- msg(M_USAGE, "local and remote/netmask --ifconfig addresses must be different");
+ msg(M_USAGE,
+ "local and remote/netmask --ifconfig addresses must be different");
}
if (ce->bind_defined && !ce->bind_local)
@@ -2113,12 +2124,14 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
if (ce->local && !ce->bind_local)
{
- msg(M_USAGE, "--local and --nobind don't make sense when used together");
+ msg(M_USAGE,
+ "--local and --nobind don't make sense when used together");
}
if (ce->local_port_defined && !ce->bind_local)
{
- msg(M_USAGE, "--lport and --nobind don't make sense when used together");
+ msg(M_USAGE,
+ "--lport and --nobind don't make sense when used together");
}
if (!ce->remote && !ce->bind_local)
@@ -2181,10 +2194,11 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
}
if (options->tuntap_options.dhcp_options
+ && options->windows_driver != WINDOWS_DRIVER_WINTUN
&& options->tuntap_options.ip_win32_type != IPW32_SET_DHCP_MASQ
&& options->tuntap_options.ip_win32_type != IPW32_SET_ADAPTIVE)
{
- msg(M_USAGE, "--dhcp-options requires --ip-win32 dynamic or adaptive");
+ msg(M_USAGE, "--dhcp-option requires --ip-win32 dynamic or adaptive");
}
if (options->windows_driver == WINDOWS_DRIVER_WINTUN && dev != DEV_TYPE_TUN)
@@ -2206,7 +2220,8 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
if (!proto_is_udp(ce->proto) && ce->explicit_exit_notification)
{
- msg(M_USAGE, "--explicit-exit-notify can only be used with --proto udp");
+ msg(M_USAGE,
+ "--explicit-exit-notify can only be used with --proto udp");
}
if (!ce->remote && ce->proto == PROTO_TCP_CLIENT)
@@ -2216,16 +2231,21 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
if ((ce->http_proxy_options) && ce->proto != PROTO_TCP_CLIENT)
{
- msg(M_USAGE, "--http-proxy MUST be used in TCP Client mode (i.e. --proto tcp-client)");
+ msg(M_USAGE,
+ "--http-proxy MUST be used in TCP Client mode (i.e. --proto "
+ "tcp-client)");
}
+
if ((ce->http_proxy_options) && !ce->http_proxy_options->server)
{
- msg(M_USAGE, "--http-proxy not specified but other http proxy options present");
+ msg(M_USAGE,
+ "--http-proxy not specified but other http proxy options present");
}
if (ce->http_proxy_options && ce->socks_proxy_server)
{
- msg(M_USAGE, "--http-proxy can not be used together with --socks-proxy");
+ msg(M_USAGE,
+ "--http-proxy can not be used together with --socks-proxy");
}
if (ce->socks_proxy_server && ce->proto == PROTO_TCP_SERVER)
@@ -2291,8 +2311,9 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
{
msg(M_USAGE, "--socks-proxy cannot be used with --mode server");
}
- /* <connection> blocks force to have a remote embedded, so we check for the
- * --remote and bail out if it is present */
+ /* <connection> blocks force to have a remote embedded, so we check
+ * for the --remote and bail out if it is present
+ */
if (options->connection_list->len >1
|| options->connection_list->array[0]->remote)
{
@@ -2309,12 +2330,15 @@ options_postprocess_verify_ce(const struct options *options, const struct connec
}
if (options->ipchange)
{
- msg(M_USAGE, "--ipchange cannot be used with --mode server (use --client-connect instead)");
+ msg(M_USAGE,
+ "--ipchange cannot be used with --mode server (use "
+ "--client-connect instead)");
}
if (!(proto_is_dgram(ce->proto) || ce->proto == PROTO_TCP_SERVER))
{
- msg(M_USAGE, "--mode server currently only supports "
- "--proto udp or --proto tcp-server or --proto tcp6-server");
+ msg(M_USAGE,
+ "--mode server currently only supports --proto udp or --proto "
+ "tcp-server or --proto tcp6-server");
}
if (!proto_is_udp(ce->proto) && (options->cf_max || options->cf_per))
{
@@ -2816,12 +2840,14 @@ options_postprocess_mutate_ce(struct options *o, struct connection_entry *ce)
}
#endif
- if (ce->proto == PROTO_TCP_CLIENT && !ce->local && !ce->local_port_defined && !ce->bind_defined)
+ if (ce->proto == PROTO_TCP_CLIENT && !ce->local
+ && !ce->local_port_defined && !ce->bind_defined)
{
ce->bind_local = false;
}
- if (ce->proto == PROTO_UDP && ce->socks_proxy_server && !ce->local && !ce->local_port_defined && !ce->bind_defined)
+ if (ce->proto == PROTO_UDP && ce->socks_proxy_server && !ce->local
+ && !ce->local_port_defined && !ce->bind_defined)
{
ce->bind_local = false;
}
@@ -2831,7 +2857,9 @@ options_postprocess_mutate_ce(struct options *o, struct connection_entry *ce)
ce->local_port = NULL;
}
- /* if protocol forcing is enabled, disable all protocols except for the forced one */
+ /* if protocol forcing is enabled, disable all protocols
+ * except for the forced one
+ */
if (o->proto_force >= 0 && o->proto_force != ce->proto)
{
ce->flags |= CE_DISABLED;
@@ -5689,7 +5717,9 @@ add_option(struct options *options,
const sa_family_t af = ascii2af(p[3]);
if (proto < 0)
{
- msg(msglevel, "remote: bad protocol associated with host %s: '%s'", p[1], p[3]);
+ msg(msglevel,
+ "remote: bad protocol associated with host %s: '%s'",
+ p[1], p[3]);
goto err;
}
re.proto = proto;
@@ -6209,7 +6239,8 @@ add_option(struct options *options,
af = ascii2af(p[1]);
if (proto < 0)
{
- msg(msglevel, "Bad protocol: '%s'. Allowed protocols with --proto option: %s",
+ msg(msglevel,
+ "Bad protocol: '%s'. Allowed protocols with --proto option: %s",
p[1],
proto2ascii_all(&gc));
goto err;
@@ -7439,7 +7470,8 @@ add_option(struct options *options,
VERIFY_PERMISSION(OPT_P_IPWIN32);
bool ipv6dns = false;
- if (streq(p[1], "DOMAIN") && p[2])
+ if ((streq(p[1], "DOMAIN") || streq(p[1], "ADAPTER_DOMAIN_SUFFIX"))
+ && p[2])
{
o->domain = p[2];
}
diff --git a/src/openvpn/otime.h b/src/openvpn/otime.h
index a6f7ec2..78d20ba 100644
--- a/src/openvpn/otime.h
+++ b/src/openvpn/otime.h
@@ -84,6 +84,7 @@ update_time(void)
openvpn_gettimeofday(&tv, NULL);
#else
update_now(time(NULL));
+ now_usec = 0;
#endif
}
diff --git a/src/openvpn/pool.c b/src/openvpn/pool.c
index 1f74ac5..ece0784 100644
--- a/src/openvpn/pool.c
+++ b/src/openvpn/pool.c
@@ -224,6 +224,24 @@ ifconfig_pool_init(const bool ipv4_pool, enum pool_type type, in_addr_t start,
}
pool->ipv6.base = ipv6_base;
+
+ /* if a pool starts at a base address that has all-zero in the
+ * host part, that first IPv6 address must not be assigned to
+ * clients because it is not usable (subnet anycast address).
+ * Start with 1, then.
+ *
+ * NOTE: this will also (mis-)fire for something like
+ * ifconfig-ipv6-pool 2001:db8:0:1:1234::0/64
+ * as we only check the rightmost 32 bits of the host part. So be it.
+ */
+ if (base == 0)
+ {
+ msg(D_IFCONFIG_POOL, "IFCONFIG POOL IPv6: incrementing pool start "
+ "to avoid ::0 assignment");
+ base++;
+ pool->ipv6.base.s6_addr[15]++;
+ }
+
pool_ipv6_size = ipv6_netbits >= 112
? (1 << (128 - ipv6_netbits)) - base
: IFCONFIG_POOL_MAX;
diff --git a/src/openvpn/route.c b/src/openvpn/route.c
index f127a90..5e1dca6 100644
--- a/src/openvpn/route.c
+++ b/src/openvpn/route.c
@@ -49,6 +49,10 @@
#include <linux/rtnetlink.h> /* RTM_GETROUTE etc. */
#endif
+#if defined(TARGET_NETBSD)
+#include <net/route.h> /* RT_ROUNDUP(), RT_ADVANCE() */
+#endif
+
#ifdef _WIN32
#include "openvpn-msg.h"
@@ -323,6 +327,10 @@ init_route(struct route_ipv4 *r,
if (get_special_addr(rl, ro->network, (in_addr_t *) &special.s_addr, &status))
{
+ if (!status)
+ {
+ goto fail;
+ }
special.s_addr = htonl(special.s_addr);
ret = openvpn_getaddrinfo(0, inet_ntoa(special), NULL, 0, NULL,
AF_INET, network_list);
@@ -619,7 +627,7 @@ init_route_list(struct route_list *rl,
rl->flags = opt->flags;
- if (remote_host)
+ if (remote_host != IPV4_INVALID_ADDR)
{
rl->spec.remote_host = remote_host;
rl->spec.flags |= RTSA_REMOTE_HOST;
@@ -1003,14 +1011,10 @@ redirect_default_route_to_vpn(struct route_list *rl, const struct tuntap *tt,
* - we are connecting to a non-IPv4 remote host (i.e. we use IPv6)
*/
else if (!(rl->rgi.flags & RGI_ADDR_DEFINED) && !local
- && (rl->spec.remote_host != IPV4_INVALID_ADDR))
+ && (rl->spec.flags & RTSA_REMOTE_HOST))
{
msg(M_WARN, "%s Cannot read current default gateway from system", err);
}
- else if (!(rl->spec.flags & RTSA_REMOTE_HOST))
- {
- msg(M_WARN, "%s Cannot obtain current remote host address", err);
- }
else
{
#ifndef TARGET_ANDROID
@@ -1033,7 +1037,8 @@ redirect_default_route_to_vpn(struct route_list *rl, const struct tuntap *tt,
/* route remote host to original default gateway */
/* if remote_host is not ipv4 (ie: ipv6), just skip
* adding this special /32 route */
- if (rl->spec.remote_host != IPV4_INVALID_ADDR)
+ if ((rl->spec.flags & RTSA_REMOTE_HOST)
+ && rl->spec.remote_host != IPV4_INVALID_ADDR)
{
add_route3(rl->spec.remote_host,
IPV4_NETMASK_HOST,
@@ -1471,6 +1476,13 @@ setenv_route_ipv6(struct env_set *es, const struct route_ipv6 *r6, int i)
buf_printf( &name2, "route_ipv6_gateway_%d", i );
setenv_str( es, BSTR(&name2), print_in6_addr( r6->gateway, 0, &gc ));
+
+ if (r6->flags & RT_METRIC_DEFINED)
+ {
+ struct buffer name3 = alloc_buf_gc( 256, &gc );
+ buf_printf( &name3, "route_ipv6_metric_%d", i) ;
+ setenv_int( es, BSTR(&name3), r6->metric);
+ }
}
gc_free(&gc);
}
@@ -1979,25 +1991,24 @@ add_route_ipv6(struct route_ipv6 *r6, const struct tuntap *tt,
}
else
{
- struct buffer out = alloc_buf_gc(64, &gc);
+ DWORD adapter_index;
if (r6->adapter_index) /* vpn server special route */
{
- buf_printf(&out, "interface=%lu", r6->adapter_index );
+ adapter_index = r6->adapter_index;
gateway_needed = true;
}
else
{
- buf_printf(&out, "interface=%lu", tt->adapter_index );
+ adapter_index = tt->adapter_index;
}
- device = buf_bptr(&out);
- /* netsh interface ipv6 add route 2001:db8::/32 MyTunDevice */
- argv_printf(&argv, "%s%s interface ipv6 add route %s/%d %s",
+ /* netsh interface ipv6 add route 2001:db8::/32 42 */
+ argv_printf(&argv, "%s%s interface ipv6 add route %s/%d %lu",
get_win_sys_path(),
NETSH_PATH_SUFFIX,
network,
r6->netbits,
- device);
+ adapter_index);
/* next-hop depends on TUN or TAP mode:
* - in TAP mode, we use the "real" next-hop
@@ -2423,25 +2434,24 @@ delete_route_ipv6(const struct route_ipv6 *r6, const struct tuntap *tt,
}
else
{
- struct buffer out = alloc_buf_gc(64, &gc);
+ DWORD adapter_index;
if (r6->adapter_index) /* vpn server special route */
{
- buf_printf(&out, "interface=%lu", r6->adapter_index );
+ adapter_index = r6->adapter_index;
gateway_needed = true;
}
else
{
- buf_printf(&out, "interface=%lu", tt->adapter_index );
+ adapter_index = tt->adapter_index;
}
- device = buf_bptr(&out);
- /* netsh interface ipv6 delete route 2001:db8::/32 MyTunDevice */
- argv_printf(&argv, "%s%s interface ipv6 delete route %s/%d %s",
+ /* netsh interface ipv6 delete route 2001:db8::/32 42 */
+ argv_printf(&argv, "%s%s interface ipv6 delete route %s/%d %lu",
get_win_sys_path(),
NETSH_PATH_SUFFIX,
network,
r6->netbits,
- device);
+ adapter_index);
/* next-hop depends on TUN or TAP mode:
* - in TAP mode, we use the "real" next-hop
@@ -3408,11 +3418,15 @@ struct rtmsg {
/* the route socket code is identical for all 4 supported BSDs and for
* MacOS X (Darwin), with one crucial difference: when going from
- * 32 bit to 64 bit, the BSDs increased the structure size but kept
+ * 32 bit to 64 bit, FreeBSD/OpenBSD increased the structure size but kept
* source code compatibility by keeping the use of "long", while
* MacOS X decided to keep binary compatibility by *changing* the API
* to use "uint32_t", thus 32 bit on all OS X variants
*
+ * NetBSD does the MacOS way of "fixed number of bits, no matter if
+ * 32 or 64 bit OS", but chose uint64_t. For maximum portability, we
+ * just use the OS RT_ROUNDUP() macro, which is guaranteed to be correct.
+ *
* We used to have a large amount of duplicate code here which really
* differed only in this (long) vs. (uint32_t) - IMHO, worse than
* having a combined block for all BSDs with this single #ifdef inside
@@ -3421,6 +3435,8 @@ struct rtmsg {
#if defined(TARGET_DARWIN)
#define ROUNDUP(a) \
((a) > 0 ? (1 + (((a) - 1) | (sizeof(uint32_t) - 1))) : sizeof(uint32_t))
+#elif defined(TARGET_NETBSD)
+#define ROUNDUP(a) RT_ROUNDUP(a)
#else
#define ROUNDUP(a) \
((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
@@ -3729,7 +3745,7 @@ get_default_gateway_ipv6(struct route_ipv6_gateway_info *rgi6,
}
if (write(sockfd, (char *)&m_rtmsg, l) < 0)
{
- msg(M_WARN, "GDG6: problem writing to routing socket");
+ msg(M_WARN|M_ERRNO, "GDG6: problem writing to routing socket");
goto done;
}
diff --git a/src/openvpn/socket.c b/src/openvpn/socket.c
index c486327..9775068 100644
--- a/src/openvpn/socket.c
+++ b/src/openvpn/socket.c
@@ -378,7 +378,8 @@ do_preresolve(struct context *c)
/* HTTP remote hostname does not need to be resolved */
if (!ce->http_proxy_options)
{
- status = do_preresolve_host(c, remote, ce->remote_port, ce->af, flags);
+ status = do_preresolve_host(c, remote, ce->remote_port,
+ ce->af, flags);
if (status != 0)
{
goto err;
@@ -417,7 +418,8 @@ do_preresolve(struct context *c)
{
flags |= GETADDR_PASSIVE;
flags &= ~GETADDR_RANDOMIZE;
- status = do_preresolve_host(c, ce->local, ce->local_port, ce->af, flags);
+ status = do_preresolve_host(c, ce->local, ce->local_port,
+ ce->af, flags);
if (status != 0)
{
goto err;
@@ -526,7 +528,9 @@ openvpn_getaddrinfo(unsigned int flags,
if ((flags & GETADDR_MENTION_RESOLVE_RETRY)
&& !resolve_retry_seconds)
{
- fmt = "RESOLVE: Cannot resolve host address: %s:%s (%s) (I would have retried this name query if you had specified the --resolv-retry option.)";
+ fmt = "RESOLVE: Cannot resolve host address: %s:%s (%s) "
+ "(I would have retried this name query if you had "
+ "specified the --resolv-retry option.)";
}
if (!(flags & GETADDR_RESOLVE) || status == EAI_FAIL)
@@ -558,11 +562,13 @@ openvpn_getaddrinfo(unsigned int flags,
while (true)
{
#ifndef _WIN32
+ /* force resolv.conf reload */
res_init();
#endif
/* try hostname lookup */
hints.ai_flags &= ~AI_NUMERICHOST;
- dmsg(D_SOCKET_DEBUG, "GETADDRINFO flags=0x%04x ai_family=%d ai_socktype=%d",
+ dmsg(D_SOCKET_DEBUG,
+ "GETADDRINFO flags=0x%04x ai_family=%d ai_socktype=%d",
flags, hints.ai_family, hints.ai_socktype);
status = getaddrinfo(hostname, servname, &hints, res);
@@ -573,7 +579,9 @@ openvpn_getaddrinfo(unsigned int flags,
{
if (*signal_received == SIGUSR1) /* ignore SIGUSR1 */
{
- msg(level, "RESOLVE: Ignored SIGUSR1 signal received during DNS resolution attempt");
+ msg(level,
+ "RESOLVE: Ignored SIGUSR1 signal received during "
+ "DNS resolution attempt");
*signal_received = 0;
}
else
@@ -634,7 +642,9 @@ openvpn_getaddrinfo(unsigned int flags,
/* IP address parse succeeded */
if (flags & GETADDR_RANDOMIZE)
{
- msg(M_WARN, "WARNING: ignoring --remote-random-hostname because the hostname is an IP address");
+ msg(M_WARN,
+ "WARNING: ignoring --remote-random-hostname because the "
+ "hostname is an IP address");
}
}
@@ -1141,8 +1151,8 @@ create_socket(struct link_socket *sock, struct addrinfo *addr)
#if defined(TARGET_LINUX)
if (sock->bind_dev)
{
- msg (M_INFO, "Using bind-dev %s", sock->bind_dev);
- if (setsockopt (sock->sd, SOL_SOCKET, SO_BINDTODEVICE, sock->bind_dev, strlen (sock->bind_dev) + 1) != 0)
+ msg(M_INFO, "Using bind-dev %s", sock->bind_dev);
+ if (setsockopt(sock->sd, SOL_SOCKET, SO_BINDTODEVICE, sock->bind_dev, strlen(sock->bind_dev) + 1) != 0)
{
msg(M_WARN|M_ERRNO, "WARN: setsockopt SO_BINDTODEVICE=%s failed", sock->bind_dev);
}
@@ -1470,14 +1480,14 @@ openvpn_connect(socket_descriptor_t sd,
struct pollfd fds[1];
fds[0].fd = sd;
fds[0].events = POLLOUT;
- status = poll(fds, 1, 0);
+ status = poll(fds, 1, (connect_timeout > 0) ? 1000 : 0);
#else
fd_set writes;
struct timeval tv;
FD_ZERO(&writes);
openvpn_fd_set(sd, &writes);
- tv.tv_sec = 0;
+ tv.tv_sec = (connect_timeout > 0) ? 1 : 0;
tv.tv_usec = 0;
status = select(sd + 1, NULL, &writes, NULL, &tv);
@@ -1507,7 +1517,7 @@ openvpn_connect(socket_descriptor_t sd,
#endif
break;
}
- management_sleep(1);
+ management_sleep(0);
continue;
}
@@ -1802,7 +1812,8 @@ resolve_remote(struct link_socket *sock,
sock->info.lsa->remote_list = ai;
sock->info.lsa->current_remote = ai;
- dmsg(D_SOCKET_DEBUG, "RESOLVE_REMOTE flags=0x%04x phase=%d rrs=%d sig=%d status=%d",
+ dmsg(D_SOCKET_DEBUG,
+ "RESOLVE_REMOTE flags=0x%04x phase=%d rrs=%d sig=%d status=%d",
flags,
phase,
retry,
@@ -2030,8 +2041,14 @@ phase2_inetd(struct link_socket *sock, const struct frame *frame,
}
else
{
- msg(M_WARN, "inetd(%s): getsockname(%d) failed, using AF_INET",
+ int saved_errno = errno;
+ msg(M_WARN|M_ERRNO, "inetd(%s): getsockname(%d) failed, using AF_INET",
proto2ascii(sock->info.proto, sock->info.af, false), (int)sock->sd);
+ /* if not called with a socket on stdin, --inetd cannot work */
+ if (saved_errno == ENOTSOCK)
+ {
+ msg(M_FATAL, "ERROR: socket required for --inetd operation");
+ }
}
}
#else /* ifdef HAVE_GETSOCKNAME */
@@ -2047,7 +2064,6 @@ phase2_inetd(struct link_socket *sock, const struct frame *frame,
false,
sock->inetd == INETD_NOWAIT,
signal_received);
-
}
ASSERT(!remote_changed);
}
@@ -3150,22 +3166,22 @@ struct proto_names {
/* Indexed by PROTO_x */
static const struct proto_names proto_names[] = {
- {"proto-uninitialized", "proto-NONE", AF_UNSPEC, PROTO_NONE},
+ {"proto-uninitialized", "proto-NONE", AF_UNSPEC, PROTO_NONE},
/* try IPv4 and IPv6 (client), bind dual-stack (server) */
- {"udp", "UDP", AF_UNSPEC, PROTO_UDP},
- {"tcp-server", "TCP_SERVER", AF_UNSPEC, PROTO_TCP_SERVER},
- {"tcp-client", "TCP_CLIENT", AF_UNSPEC, PROTO_TCP_CLIENT},
- {"tcp", "TCP", AF_UNSPEC, PROTO_TCP},
+ {"udp", "UDP", AF_UNSPEC, PROTO_UDP},
+ {"tcp-server", "TCP_SERVER", AF_UNSPEC, PROTO_TCP_SERVER},
+ {"tcp-client", "TCP_CLIENT", AF_UNSPEC, PROTO_TCP_CLIENT},
+ {"tcp", "TCP", AF_UNSPEC, PROTO_TCP},
/* force IPv4 */
- {"udp4", "UDPv4", AF_INET, PROTO_UDP},
- {"tcp4-server","TCPv4_SERVER", AF_INET, PROTO_TCP_SERVER},
- {"tcp4-client","TCPv4_CLIENT", AF_INET, PROTO_TCP_CLIENT},
- {"tcp4", "TCPv4", AF_INET, PROTO_TCP},
+ {"udp4", "UDPv4", AF_INET, PROTO_UDP},
+ {"tcp4-server", "TCPv4_SERVER", AF_INET, PROTO_TCP_SERVER},
+ {"tcp4-client", "TCPv4_CLIENT", AF_INET, PROTO_TCP_CLIENT},
+ {"tcp4", "TCPv4", AF_INET, PROTO_TCP},
/* force IPv6 */
- {"udp6","UDPv6", AF_INET6, PROTO_UDP},
- {"tcp6-server","TCPv6_SERVER", AF_INET6, PROTO_TCP_SERVER},
- {"tcp6-client","TCPv6_CLIENT", AF_INET6, PROTO_TCP_CLIENT},
- {"tcp6","TCPv6", AF_INET6, PROTO_TCP},
+ {"udp6", "UDPv6", AF_INET6, PROTO_UDP},
+ {"tcp6-server", "TCPv6_SERVER", AF_INET6, PROTO_TCP_SERVER},
+ {"tcp6-client", "TCPv6_CLIENT", AF_INET6, PROTO_TCP_CLIENT},
+ {"tcp6", "TCPv6", AF_INET6, PROTO_TCP},
};
bool
@@ -3177,6 +3193,7 @@ proto_is_net(int proto)
}
return proto != PROTO_NONE;
}
+
bool
proto_is_dgram(int proto)
{
diff --git a/src/openvpn/socks.c b/src/openvpn/socks.c
index 57f0cee..36df747 100644
--- a/src/openvpn/socks.c
+++ b/src/openvpn/socks.c
@@ -312,7 +312,7 @@ recv_socks_reply(socket_descriptor_t sd,
char atyp = '\0';
int alen = 0;
int len = 0;
- char buf[22];
+ char buf[270]; /* 4 + alen(max 256) + 2 */
const int timeout_sec = 5;
if (addr != NULL)
@@ -381,7 +381,10 @@ recv_socks_reply(socket_descriptor_t sd,
break;
case '\x03': /* DOMAINNAME */
- alen = (unsigned char) c;
+ /* RFC 1928, section 5: 1 byte length, <n> bytes name,
+ * so the total "address length" is (length+1)
+ */
+ alen = (unsigned char) c + 1;
break;
case '\x04': /* IP V6 */
@@ -451,7 +454,7 @@ establish_socks_proxy_passthru(struct socks_proxy_info *p,
const char *servname, /* openvpn server port */
volatile int *signal_received)
{
- char buf[128];
+ char buf[270];
size_t len;
if (!socks_handshake(p, sd, signal_received))
diff --git a/src/openvpn/ssl.c b/src/openvpn/ssl.c
index f16114c..c6ba812 100644
--- a/src/openvpn/ssl.c
+++ b/src/openvpn/ssl.c
@@ -2484,6 +2484,14 @@ key_method_2_read(struct buffer *buf, struct tls_multi *multi, struct tls_sessio
multi->remote_ciphername =
options_string_extract_option(options, "cipher", NULL);
+ /* In OCC we send '[null-cipher]' instead 'none' */
+ if (multi->remote_ciphername
+ && strcmp(multi->remote_ciphername, "[null-cipher]") == 0)
+ {
+ free(multi->remote_ciphername);
+ multi->remote_ciphername = string_alloc("none", NULL);
+ }
+
if (tls_session_user_pass_enabled(session))
{
/* Perform username/password authentication */
diff --git a/src/openvpn/ssl_ncp.c b/src/openvpn/ssl_ncp.c
index 5549639..45bddbe 100644
--- a/src/openvpn/ssl_ncp.c
+++ b/src/openvpn/ssl_ncp.c
@@ -110,7 +110,15 @@ mutate_ncp_cipher_list(const char *list, struct gc_arena *gc)
* e.g. replacing AeS-128-gCm with AES-128-GCM
*/
const cipher_kt_t *ktc = cipher_kt_get(token);
- if (!ktc)
+ if (strcmp(token, "none") == 0)
+ {
+ msg(M_WARN, "WARNING: cipher 'none' specified for --data-ciphers. "
+ "This allows negotiation of NO encryption and "
+ "tunnelled data WILL then be transmitted in clear text "
+ "over the network! "
+ "PLEASE DO RECONSIDER THIS SETTING!");
+ }
+ if (!ktc && strcmp(token, "none") != 0)
{
msg(M_WARN, "Unsupported cipher in --data-ciphers: %s", token);
error_found = true;
@@ -118,6 +126,12 @@ mutate_ncp_cipher_list(const char *list, struct gc_arena *gc)
else
{
const char *ovpn_cipher_name = cipher_kt_name(ktc);
+ if (ktc == NULL)
+ {
+ /* NULL resolves to [null-cipher] but we need none for
+ * data-ciphers */
+ ovpn_cipher_name = "none";
+ }
if (buf_len(&new_list)> 0)
{
@@ -325,4 +339,4 @@ check_pull_client_ncp(struct context *c, const int found)
"to this server.");
return false;
}
-} \ No newline at end of file
+}
diff --git a/src/openvpn/ssl_verify.c b/src/openvpn/ssl_verify.c
index 97ccb93..33115eb 100644
--- a/src/openvpn/ssl_verify.c
+++ b/src/openvpn/ssl_verify.c
@@ -1068,69 +1068,51 @@ verify_user_pass_script(struct tls_session *session, struct tls_multi *multi,
const char *tmp_file = "";
bool ret = false;
- /* Is username defined? */
- if ((session->opt->ssl_flags & SSLF_AUTH_USER_PASS_OPTIONAL) || strlen(up->username))
+ /* Set environmental variables prior to calling script */
+ setenv_str(session->opt->es, "script_type", "user-pass-verify");
+
+ /* format command line */
+ argv_parse_cmd(&argv, session->opt->auth_user_pass_verify_script);
+
+ if (session->opt->auth_user_pass_verify_script_via_file)
{
- /* Set environmental variables prior to calling script */
- setenv_str(session->opt->es, "script_type", "user-pass-verify");
+ struct status_output *so;
- if (session->opt->auth_user_pass_verify_script_via_file)
+ tmp_file = platform_create_temp_file(session->opt->tmp_dir, "up",
+ &gc);
+ if (tmp_file)
{
- struct status_output *so;
-
- tmp_file = platform_create_temp_file(session->opt->tmp_dir, "up",
- &gc);
- if (tmp_file)
- {
- so = status_open(tmp_file, 0, -1, NULL, STATUS_OUTPUT_WRITE);
- status_printf(so, "%s", up->username);
- status_printf(so, "%s", up->password);
- if (!status_close(so))
- {
- msg(D_TLS_ERRORS, "TLS Auth Error: could not write username/password to file: %s",
- tmp_file);
- goto done;
- }
- }
- else
+ so = status_open(tmp_file, 0, -1, NULL, STATUS_OUTPUT_WRITE);
+ status_printf(so, "%s", up->username);
+ status_printf(so, "%s", up->password);
+ if (!status_close(so))
{
- msg(D_TLS_ERRORS, "TLS Auth Error: could not create write "
- "username/password to temp file");
+ msg(D_TLS_ERRORS, "TLS Auth Error: could not write username/password to file: %s",
+ tmp_file);
+ goto done;
}
+ /* pass temp file name to script */
+ argv_printf_cat(&argv, "%s", tmp_file);
}
else
{
- setenv_str(session->opt->es, "username", up->username);
- setenv_str(session->opt->es, "password", up->password);
- }
-
- /* setenv incoming cert common name for script */
- setenv_str(session->opt->es, "common_name", session->common_name);
-
- /* setenv client real IP address */
- setenv_untrusted(session);
-
- /* add auth-token environment */
- add_session_token_env(session, multi, up);
-
- /* format command line */
- argv_parse_cmd(&argv, session->opt->auth_user_pass_verify_script);
- argv_printf_cat(&argv, "%s", tmp_file);
-
- /* call command */
- ret = openvpn_run_script(&argv, session->opt->es, 0,
- "--auth-user-pass-verify");
-
- if (!session->opt->auth_user_pass_verify_script_via_file)
- {
- setenv_del(session->opt->es, "password");
+ msg(D_TLS_ERRORS, "TLS Auth Error: could not create write "
+ "username/password to temp file");
}
}
else
{
- msg(D_TLS_ERRORS, "TLS Auth Error: peer provided a blank username");
+ setenv_str(session->opt->es, "password", up->password);
}
+ /* call command */
+ ret = openvpn_run_script(&argv, session->opt->es, 0,
+ "--auth-user-pass-verify");
+
+ if (!session->opt->auth_user_pass_verify_script_via_file)
+ {
+ setenv_del(session->opt->es, "password");
+ }
done:
if (tmp_file && strlen(tmp_file) > 0)
{
@@ -1154,48 +1136,31 @@ verify_user_pass_plugin(struct tls_session *session, struct tls_multi *multi,
struct key_state *ks = &session->key[KS_PRIMARY]; /* primary key */
#endif
- /* Is username defined? */
- if ((session->opt->ssl_flags & SSLF_AUTH_USER_PASS_OPTIONAL) || strlen(up->username))
- {
- /* set username/password in private env space */
- setenv_str(session->opt->es, "username", up->username);
- setenv_str(session->opt->es, "password", up->password);
-
- /* setenv incoming cert common name for script */
- setenv_str(session->opt->es, "common_name", session->common_name);
+ /* set password in private env space */
+ setenv_str(session->opt->es, "password", up->password);
- /* setenv client real IP address */
- setenv_untrusted(session);
-
- /* add auth-token environment */
- add_session_token_env(session, multi, up);
#ifdef PLUGIN_DEF_AUTH
- /* generate filename for deferred auth control file */
- if (!key_state_gen_auth_control_file(ks, session->opt))
- {
- msg(D_TLS_ERRORS, "TLS Auth Error (%s): "
- "could not create deferred auth control file", __func__);
- return retval;
- }
+ /* generate filename for deferred auth control file */
+ if (!key_state_gen_auth_control_file(ks, session->opt))
+ {
+ msg(D_TLS_ERRORS, "TLS Auth Error (%s): "
+ "could not create deferred auth control file", __func__);
+ return retval;
+ }
#endif
- /* call command */
- retval = plugin_call(session->opt->plugins, OPENVPN_PLUGIN_AUTH_USER_PASS_VERIFY, NULL, NULL, session->opt->es);
+ /* call command */
+ retval = plugin_call(session->opt->plugins, OPENVPN_PLUGIN_AUTH_USER_PASS_VERIFY, NULL, NULL, session->opt->es);
#ifdef PLUGIN_DEF_AUTH
- /* purge auth control filename (and file itself) for non-deferred returns */
- if (retval != OPENVPN_PLUGIN_FUNC_DEFERRED)
- {
- key_state_rm_auth_control_file(ks);
- }
-#endif
-
- setenv_del(session->opt->es, "password");
- }
- else
+ /* purge auth control filename (and file itself) for non-deferred returns */
+ if (retval != OPENVPN_PLUGIN_FUNC_DEFERRED)
{
- msg(D_TLS_ERRORS, "TLS Auth Error (verify_user_pass_plugin): peer provided a blank username");
+ key_state_rm_auth_control_file(ks);
}
+#endif
+
+ setenv_del(session->opt->es, "password");
return retval;
}
@@ -1218,12 +1183,30 @@ verify_user_pass_management(struct tls_session *session,
int retval = KMDA_ERROR;
struct key_state *ks = &session->key[KS_PRIMARY]; /* primary key */
+ /* set username/password in private env space */
+ setenv_str(session->opt->es, "password", up->password);
+
+ if (management)
+ {
+ management_notify_client_needing_auth(management, ks->mda_key_id, session->opt->mda_context, session->opt->es);
+ }
+
+ setenv_del(session->opt->es, "password");
+
+ retval = KMDA_SUCCESS;
+
+ return retval;
+}
+#endif /* ifdef MANAGEMENT_DEF_AUTH */
+
+static bool
+set_verify_user_pass_env(struct user_pass *up, struct tls_multi *multi,
+ struct tls_session *session)
+{
/* Is username defined? */
if ((session->opt->ssl_flags & SSLF_AUTH_USER_PASS_OPTIONAL) || strlen(up->username))
{
- /* set username/password in private env space */
setenv_str(session->opt->es, "username", up->username);
- setenv_str(session->opt->es, "password", up->password);
/* setenv incoming cert common name for script */
setenv_str(session->opt->es, "common_name", session->common_name);
@@ -1236,24 +1219,14 @@ verify_user_pass_management(struct tls_session *session,
* allow the management to figure out if it is a new session or a continued one
*/
add_session_token_env(session, multi, up);
- if (management)
- {
- management_notify_client_needing_auth(management, ks->mda_key_id, session->opt->mda_context, session->opt->es);
- }
-
- setenv_del(session->opt->es, "password");
-
- retval = KMDA_SUCCESS;
+ return true;
}
else
{
- msg(D_TLS_ERRORS, "TLS Auth Error (verify_user_pass_management): peer provided a blank username");
+ msg(D_TLS_ERRORS, "TLS Auth Error: peer provided a blank username");
+ return false;
}
-
- return retval;
}
-#endif /* ifdef MANAGEMENT_DEF_AUTH */
-
/*
* Main username/password verification entry point
@@ -1325,6 +1298,14 @@ verify_user_pass(struct user_pass *up, struct tls_multi *multi,
return;
}
}
+
+ /* Set the environment variables used by all auth variants */
+ if (!set_verify_user_pass_env(up, multi, session))
+ {
+ skip_auth = true;
+ s1 = OPENVPN_PLUGIN_FUNC_ERROR;
+ }
+
/* call plugin(s) and/or script */
if (!skip_auth)
{
diff --git a/src/openvpn/tun.c b/src/openvpn/tun.c
index 923131a..8315a42 100644
--- a/src/openvpn/tun.c
+++ b/src/openvpn/tun.c
@@ -68,7 +68,7 @@ const static GUID GUID_DEVINTERFACE_NET = { 0xcac88484, 0x7515, 0x4c03, { 0x82,
#define NI_OPTIONS (1<<2)
static void netsh_ifconfig(const struct tuntap_options *to,
- const char *flex_name,
+ DWORD adapter_index,
const in_addr_t ip,
const in_addr_t netmask,
const unsigned int flags);
@@ -79,7 +79,7 @@ static void windows_set_mtu(const int iface_index,
static void netsh_set_dns6_servers(const struct in6_addr *addr_list,
const int addr_len,
- const char *flex_name);
+ DWORD adapter_index);
static void netsh_command(const struct argv *a, int n, int msglevel);
@@ -115,11 +115,17 @@ do_address_service(const bool add, const short family, const struct tuntap *tt)
{
addr.address.ipv4.s_addr = htonl(tt->local);
addr.prefix_len = netmask_to_netbits2(tt->adapter_netmask);
+ msg(D_IFCONFIG, "INET address service: %s %s/%d",
+ add ? "add" : "remove",
+ print_in_addr_t(tt->local, 0, &gc), addr.prefix_len);
}
else
{
addr.address.ipv6 = tt->local_ipv6;
- addr.prefix_len = tt->netbits_ipv6;
+ addr.prefix_len = (tt->type == DEV_TYPE_TUN) ? 128 : tt->netbits_ipv6;
+ msg(D_IFCONFIG, "INET6 address service: %s %s/%d",
+ add ? "add" : "remove",
+ print_in6_addr(tt->local_ipv6, 0, &gc), addr.prefix_len);
}
if (!send_msg_iservice(pipe, &addr, sizeof(addr), &ack, "TUN"))
@@ -143,6 +149,61 @@ out:
}
static bool
+do_dns_domain_service(bool add, const struct tuntap *tt)
+{
+ bool ret = false;
+ ack_message_t ack;
+ struct gc_arena gc = gc_new();
+ HANDLE pipe = tt->options.msg_channel;
+
+ if (!tt->options.domain) /* no domain to add or delete */
+ {
+ return true;
+ }
+
+ /* Use dns_cfg_msg with addr_len = 0 for setting only the DOMAIN */
+ dns_cfg_message_t dns = {
+ .header = {
+ (add ? msg_add_dns_cfg : msg_del_dns_cfg),
+ sizeof(dns_cfg_message_t),
+ 0
+ },
+ .iface = { .index = tt->adapter_index, .name = "" },
+ .domains = "", /* set below */
+ .family = AF_INET, /* unused */
+ .addr_len = 0 /* add/delete only the domain, not DNS servers */
+ };
+
+ strncpynt(dns.iface.name, tt->actual_name, sizeof(dns.iface.name));
+ strncpynt(dns.domains, tt->options.domain, sizeof(dns.domains));
+ /* truncation of domain name is not checked as it can't happen
+ * with 512 bytes room in dns.domains.
+ */
+
+ msg(D_LOW, "%s dns domain on '%s' (if_index = %d) using service",
+ (add ? "Setting" : "Deleting"), dns.iface.name, dns.iface.index);
+ if (!send_msg_iservice(pipe, &dns, sizeof(dns), &ack, "TUN"))
+ {
+ goto out;
+ }
+
+ if (ack.error_number != NO_ERROR)
+ {
+ msg(M_WARN, "TUN: %s dns domain failed using service: %s [status=%u if_name=%s]",
+ (add ? "adding" : "deleting"), strerror_win32(ack.error_number, &gc),
+ ack.error_number, dns.iface.name);
+ goto out;
+ }
+
+ msg(M_INFO, "DNS domain %s using service", (add ? "set" : "deleted"));
+ ret = true;
+
+out:
+ gc_free(&gc);
+ return ret;
+}
+
+static bool
do_dns_service(bool add, const short family, const struct tuntap *tt)
{
bool ret = false;
@@ -158,6 +219,7 @@ do_dns_service(bool add, const short family, const struct tuntap *tt)
return true;
}
+ /* Use dns_cfg_msg with domain = "" for setting only the DNS servers */
dns_cfg_message_t dns = {
.header = {
(add ? msg_add_dns_cfg : msg_del_dns_cfg),
@@ -1088,26 +1150,40 @@ do_ifconfig_ipv6(struct tuntap *tt, const char *ifname, int tun_mtu,
else if (tt->options.msg_channel)
{
do_address_service(true, AF_INET6, tt);
- add_route_connected_v6_net(tt, es);
+ if (tt->type == DEV_TYPE_TUN)
+ {
+ add_route_connected_v6_net(tt, es);
+ }
do_dns_service(true, AF_INET6, tt);
do_set_mtu_service(tt, AF_INET6, tun_mtu);
+ /* If IPv4 is not enabled, set DNS domain here */
+ if (!tt->did_ifconfig_setup)
+ {
+ do_dns_domain_service(true, tt);
+ }
}
else
{
- /* example: netsh interface ipv6 set address interface=42
- * 2001:608:8003::d store=active
+ /* example: netsh interface ipv6 set address 42
+ * 2001:608:8003::d/bits store=active
+ */
+
+ /* in TUN mode, we only simulate a subnet, so the interface
+ * is configured with /128 + a route to fe80::8. In TAP mode,
+ * the correct netbits must be set, and no on-link route
*/
- char iface[64];
+ int netbits = (tt->type == DEV_TYPE_TUN) ? 128 : tt->netbits_ipv6;
- openvpn_snprintf(iface, sizeof(iface), "interface=%lu",
- tt->adapter_index);
- argv_printf(&argv, "%s%s interface ipv6 set address %s %s store=active",
- get_win_sys_path(), NETSH_PATH_SUFFIX, iface,
- ifconfig_ipv6_local);
+ argv_printf(&argv, "%s%s interface ipv6 set address %lu %s/%d store=active",
+ get_win_sys_path(), NETSH_PATH_SUFFIX, tt->adapter_index,
+ ifconfig_ipv6_local, netbits);
netsh_command(&argv, 4, M_FATAL);
- add_route_connected_v6_net(tt, es);
+ if (tt->type == DEV_TYPE_TUN)
+ {
+ add_route_connected_v6_net(tt, es);
+ }
/* set ipv6 dns servers if any are specified */
- netsh_set_dns6_servers(tt->options.dns6, tt->options.dns6_len, ifname);
+ netsh_set_dns6_servers(tt->options.dns6, tt->options.dns6_len, tt->adapter_index);
windows_set_mtu(tt->adapter_index, AF_INET6, tun_mtu);
}
#else /* platforms we have no IPv6 code for */
@@ -1224,7 +1300,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
argv_printf(&argv, "%s %s netmask 255.255.255.255", IFCONFIG_PATH,
ifname);
}
- else if (tt->topology == TOP_SUBNET)
+ else if (tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
argv_printf(&argv, "%s %s %s %s netmask %s mtu %d up", IFCONFIG_PATH,
ifname, ifconfig_local, ifconfig_local,
@@ -1243,7 +1319,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
solaris_error_close(tt, es, ifname, false);
}
- if (!tun && tt->topology == TOP_SUBNET)
+ if (!tun && tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
/* Add a network route for the local tun interface */
struct route_ipv4 r;
@@ -1274,7 +1350,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
IFCONFIG_PATH, ifname, ifconfig_local,
ifconfig_remote_netmask, tun_mtu);
}
- else if (tt->topology == TOP_SUBNET)
+ else if (tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
remote_end = create_arbitrary_remote( tt );
argv_printf(&argv, "%s %s %s %s mtu %d netmask %s up -link0",
@@ -1292,7 +1368,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
openvpn_execve_check(&argv, es, S_FATAL, "OpenBSD ifconfig failed");
/* Add a network route for the local tun interface */
- if (!tun && tt->topology == TOP_SUBNET)
+ if (!tun && tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
struct route_ipv4 r;
CLEAR(r);
@@ -1312,7 +1388,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
IFCONFIG_PATH, ifname, ifconfig_local,
ifconfig_remote_netmask, tun_mtu);
}
- else if (tt->topology == TOP_SUBNET)
+ else if (tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
remote_end = create_arbitrary_remote(tt);
argv_printf(&argv, "%s %s %s %s mtu %d netmask %s up", IFCONFIG_PATH,
@@ -1334,7 +1410,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
openvpn_execve_check(&argv, es, S_FATAL, "NetBSD ifconfig failed");
/* Add a network route for the local tun interface */
- if (!tun && tt->topology == TOP_SUBNET)
+ if (!tun && tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
struct route_ipv4 r;
CLEAR(r);
@@ -1366,7 +1442,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
}
else
{
- if (tt->topology == TOP_SUBNET)
+ if (tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
argv_printf(&argv, "%s %s %s %s netmask %s mtu %d up",
IFCONFIG_PATH, ifname, ifconfig_local, ifconfig_local,
@@ -1384,7 +1460,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
openvpn_execve_check(&argv, es, S_FATAL, "Mac OS X ifconfig failed");
/* Add a network route for the local tun interface */
- if (!tun && tt->topology == TOP_SUBNET)
+ if (!tun && tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
struct route_ipv4 r;
CLEAR(r);
@@ -1406,7 +1482,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
IFCONFIG_PATH, ifname, ifconfig_local,
ifconfig_remote_netmask, tun_mtu);
}
- else if (tt->topology == TOP_SUBNET)
+ else if (tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
remote_end = create_arbitrary_remote( tt );
argv_printf(&argv, "%s %s %s %s mtu %d netmask %s up", IFCONFIG_PATH,
@@ -1423,7 +1499,7 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
openvpn_execve_check(&argv, es, S_FATAL, "FreeBSD ifconfig failed");
/* Add a network route for the local tun interface */
- if (!tun && tt->topology == TOP_SUBNET)
+ if (!tun && tt->type == DEV_TYPE_TUN && tt->topology == TOP_SUBNET)
{
struct route_ipv4 r;
CLEAR(r);
@@ -1455,8 +1531,6 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
env_set_destroy(aix_es);
}
#elif defined (_WIN32)
- ASSERT(ifname != NULL);
-
if (tt->options.ip_win32_type == IPW32_SET_MANUAL)
{
msg(M_INFO,
@@ -1472,10 +1546,11 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu,
{
do_address_service(true, AF_INET, tt);
do_dns_service(true, AF_INET, tt);
+ do_dns_domain_service(true, tt);
}
else if (tt->options.ip_win32_type == IPW32_SET_NETSH)
{
- netsh_ifconfig(&tt->options, ifname, tt->local,
+ netsh_ifconfig(&tt->options, tt->adapter_index, tt->local,
tt->adapter_netmask, NI_IP_NETMASK|NI_OPTIONS);
}
if (tt->options.msg_channel)
@@ -1993,6 +2068,11 @@ open_tun(const char *dev, const char *dev_type, const char *dev_node, struct tun
#ifdef ENABLE_FEATURE_TUN_PERSIST
+/* TUNSETGROUP appeared in 2.6.23 */
+#ifndef TUNSETGROUP
+# define TUNSETGROUP _IOW('T', 206, int)
+#endif
+
void
tuncfg(const char *dev, const char *dev_type, const char *dev_node,
int persist_mode, const char *username, const char *groupname,
@@ -2032,7 +2112,7 @@ tuncfg(const char *dev, const char *dev_type, const char *dev_node,
}
else if (ioctl(tt->fd, TUNSETGROUP, platform_state_group.gr->gr_gid) < 0)
{
- msg(M_ERR, "Cannot ioctl TUNSETOWNER(%s) %s", groupname, dev);
+ msg(M_ERR, "Cannot ioctl TUNSETGROUP(%s) %s", groupname, dev);
}
}
close_tun(tt, ctx);
@@ -5263,23 +5343,29 @@ ip_addr_member_of(const in_addr_t addr, const IP_ADDR_STRING *ias)
* Set the ipv6 dns servers on the specified interface.
* The list of dns servers currently set on the interface
* are cleared first.
- * No action is taken if number of addresses (addr_len) < 1.
*/
static void
netsh_set_dns6_servers(const struct in6_addr *addr_list,
const int addr_len,
- const char *flex_name)
+ DWORD adapter_index)
{
struct gc_arena gc = gc_new();
struct argv argv = argv_new();
+ /* delete existing DNS settings from TAP interface */
+ argv_printf(&argv, "%s%s interface ipv6 delete dns %lu all",
+ get_win_sys_path(),
+ NETSH_PATH_SUFFIX,
+ adapter_index);
+ netsh_command(&argv, 2, M_FATAL);
+
for (int i = 0; i < addr_len; ++i)
{
const char *fmt = (i == 0) ?
- "%s%s interface ipv6 set dns %s static %s"
- : "%s%s interface ipv6 add dns %s %s";
+ "%s%s interface ipv6 set dns %lu static %s"
+ : "%s%s interface ipv6 add dns %lu %s";
argv_printf(&argv, fmt, get_win_sys_path(),
- NETSH_PATH_SUFFIX, flex_name,
+ NETSH_PATH_SUFFIX, adapter_index,
print_in6_addr(addr_list[i], 0, &gc));
/* disable slow address validation on Windows 7 and higher */
@@ -5301,7 +5387,7 @@ netsh_ifconfig_options(const char *type,
const in_addr_t *addr_list,
const int addr_len,
const IP_ADDR_STRING *current,
- const char *flex_name,
+ DWORD adapter_index,
const bool test_first)
{
struct gc_arena gc = gc_new();
@@ -5325,11 +5411,11 @@ netsh_ifconfig_options(const char *type,
/* delete existing DNS/WINS settings from TAP interface */
if (delete_first)
{
- argv_printf(&argv, "%s%s interface ip delete %s %s all",
+ argv_printf(&argv, "%s%s interface ip delete %s %lu all",
get_win_sys_path(),
NETSH_PATH_SUFFIX,
type,
- flex_name);
+ adapter_index);
netsh_command(&argv, 2, M_FATAL);
}
@@ -5342,14 +5428,14 @@ netsh_ifconfig_options(const char *type,
if (delete_first || !test_first || !ip_addr_member_of(addr_list[i], current))
{
const char *fmt = count ?
- "%s%s interface ip add %s %s %s"
- : "%s%s interface ip set %s %s static %s";
+ "%s%s interface ip add %s %lu %s"
+ : "%s%s interface ip set %s %lu static %s";
argv_printf(&argv, fmt,
get_win_sys_path(),
NETSH_PATH_SUFFIX,
type,
- flex_name,
+ adapter_index,
print_in_addr_t(addr_list[i], 0, &gc));
/* disable slow address validation on Windows 7 and higher */
@@ -5365,8 +5451,8 @@ netsh_ifconfig_options(const char *type,
}
else
{
- msg(M_INFO, "NETSH: \"%s\" %s %s [already set]",
- flex_name,
+ msg(M_INFO, "NETSH: %lu %s %s [already set]",
+ adapter_index,
type,
print_in_addr_t(addr_list[i], 0, &gc));
}
@@ -5397,7 +5483,7 @@ init_ip_addr_string2(IP_ADDR_STRING *dest, const IP_ADDR_STRING *src1, const IP_
static void
netsh_ifconfig(const struct tuntap_options *to,
- const char *flex_name,
+ DWORD adapter_index,
const in_addr_t ip,
const in_addr_t netmask,
const unsigned int flags)
@@ -5410,27 +5496,26 @@ netsh_ifconfig(const struct tuntap_options *to,
if (flags & NI_TEST_FIRST)
{
const IP_ADAPTER_INFO *list = get_adapter_info_list(&gc);
- const int index = get_adapter_index_flexible(flex_name);
- ai = get_adapter(list, index);
- pai = get_per_adapter_info(index, &gc);
+ ai = get_adapter(list, adapter_index);
+ pai = get_per_adapter_info(adapter_index, &gc);
}
if (flags & NI_IP_NETMASK)
{
if (test_adapter_ip_netmask(ai, ip, netmask))
{
- msg(M_INFO, "NETSH: \"%s\" %s/%s [already set]",
- flex_name,
+ msg(M_INFO, "NETSH: %lu %s/%s [already set]",
+ adapter_index,
print_in_addr_t(ip, 0, &gc),
print_in_addr_t(netmask, 0, &gc));
}
else
{
- /* example: netsh interface ip set address my-tap static 10.3.0.1 255.255.255.0 */
- argv_printf(&argv, "%s%s interface ip set address %s static %s %s",
+ /* example: netsh interface ip set address 42 static 10.3.0.1 255.255.255.0 */
+ argv_printf(&argv, "%s%s interface ip set address %lu static %s %s",
get_win_sys_path(),
NETSH_PATH_SUFFIX,
- flex_name,
+ adapter_index,
print_in_addr_t(ip, 0, &gc),
print_in_addr_t(netmask, 0, &gc));
@@ -5449,7 +5534,7 @@ netsh_ifconfig(const struct tuntap_options *to,
to->dns,
to->dns_len,
pai ? &pai->DnsServerList : NULL,
- flex_name,
+ adapter_index,
BOOL_CAST(flags & NI_TEST_FIRST));
if (ai && ai->HaveWins)
{
@@ -5460,7 +5545,7 @@ netsh_ifconfig(const struct tuntap_options *to,
to->wins,
to->wins_len,
ai ? wins : NULL,
- flex_name,
+ adapter_index,
BOOL_CAST(flags & NI_TEST_FIRST));
}
@@ -5469,16 +5554,16 @@ netsh_ifconfig(const struct tuntap_options *to,
}
static void
-netsh_enable_dhcp(const char *actual_name)
+netsh_enable_dhcp(DWORD adapter_index)
{
struct argv argv = argv_new();
- /* example: netsh interface ip set address my-tap dhcp */
+ /* example: netsh interface ip set address 42 dhcp */
argv_printf(&argv,
- "%s%s interface ip set address %s dhcp",
+ "%s%s interface ip set address %lu dhcp",
get_win_sys_path(),
NETSH_PATH_SUFFIX,
- actual_name);
+ adapter_index);
netsh_command(&argv, 4, M_FATAL);
@@ -5624,7 +5709,7 @@ tun_standby(struct tuntap *tt)
{
msg(M_INFO, "NOTE: now trying netsh (this may take some time)");
netsh_ifconfig(&tt->options,
- tt->actual_name,
+ tt->adapter_index,
tt->local,
tt->adapter_netmask,
NI_TEST_FIRST|NI_IP_NETMASK|NI_OPTIONS);
@@ -6486,7 +6571,7 @@ tun_open_device(struct tuntap *tt, const char *dev_node, const char **device_gui
if (!*device_guid)
{
- msg(M_FATAL, "All %s adapters on this system are currently in use.", print_windows_driver(tt->windows_driver));
+ msg(M_FATAL, "All %s adapters on this system are currently in use or disabled.", print_windows_driver(tt->windows_driver));
}
if (tt->windows_driver != windows_driver)
@@ -6529,7 +6614,7 @@ tuntap_set_ip_props(const struct tuntap *tt, bool *dhcp_masq, bool *dhcp_masq_po
}
else
{
- netsh_enable_dhcp(tt->actual_name);
+ netsh_enable_dhcp(tt->adapter_index);
}
}
*dhcp_masq = true;
@@ -6543,7 +6628,7 @@ tuntap_set_ip_props(const struct tuntap *tt, bool *dhcp_masq, bool *dhcp_masq_po
if (dhcp_status(tt->adapter_index) != DHCP_STATUS_ENABLED)
{
netsh_ifconfig(&tt->options,
- tt->actual_name,
+ tt->adapter_index,
tt->local,
tt->adapter_netmask,
NI_TEST_FIRST | NI_IP_NETMASK | NI_OPTIONS);
@@ -6675,15 +6760,25 @@ netsh_delete_address_dns(const struct tuntap *tt, bool ipv6, struct gc_arena *gc
if (len > 0)
{
argv_printf(&argv,
- "%s%s interface %s delete dns %s all",
+ "%s%s interface %s delete dns %lu all",
get_win_sys_path(),
NETSH_PATH_SUFFIX,
ipv6 ? "ipv6" : "ipv4",
- tt->actual_name);
+ tt->adapter_index);
netsh_command(&argv, 1, M_WARN);
}
- if (ipv6)
+ if (!ipv6 && tt->options.wins_len > 0)
+ {
+ argv_printf(&argv,
+ "%s%s interface ipv4 delete winsservers %lu all",
+ get_win_sys_path(),
+ NETSH_PATH_SUFFIX,
+ tt->adapter_index);
+ netsh_command(&argv, 1, M_WARN);
+ }
+
+ if (ipv6 && tt->type == DEV_TYPE_TUN)
{
delete_route_connected_v6_net(tt);
}
@@ -6692,7 +6787,7 @@ netsh_delete_address_dns(const struct tuntap *tt, bool ipv6, struct gc_arena *gc
* address we added (pointed out by Cedric Tabary).
*/
- /* netsh interface ipvX delete address \"%s\" %s */
+ /* netsh interface ipvX delete address %lu %s */
if (ipv6)
{
ifconfig_ip_local = print_in6_addr(tt->local_ipv6, 0, gc);
@@ -6702,11 +6797,11 @@ netsh_delete_address_dns(const struct tuntap *tt, bool ipv6, struct gc_arena *gc
ifconfig_ip_local = print_in_addr_t(tt->local, 0, gc);
}
argv_printf(&argv,
- "%s%s interface %s delete address %s %s store=active",
+ "%s%s interface %s delete address %lu %s store=active",
get_win_sys_path(),
NETSH_PATH_SUFFIX,
ipv6 ? "ipv6" : "ipv4",
- tt->actual_name,
+ tt->adapter_index,
ifconfig_ip_local);
netsh_command(&argv, 1, M_WARN);
@@ -6728,6 +6823,11 @@ close_tun(struct tuntap *tt, openvpn_net_ctx_t *ctx)
}
else if (tt->options.msg_channel)
{
+ /* If IPv4 is not enabled, delete DNS domain here */
+ if (!tt->did_ifconfig_setup)
+ {
+ do_dns_domain_service(false, tt);
+ }
if (tt->options.dns6_len > 0)
{
do_dns_service(false, AF_INET6, tt);
@@ -6753,6 +6853,7 @@ close_tun(struct tuntap *tt, openvpn_net_ctx_t *ctx)
}
else if (tt->options.msg_channel)
{
+ do_dns_domain_service(false, tt);
do_dns_service(false, AF_INET, tt);
do_address_service(false, AF_INET, tt);
}
diff --git a/src/openvpnmsica/dllmain.c b/src/openvpnmsica/dllmain.c
index 201fd9a..34946ed 100644
--- a/src/openvpnmsica/dllmain.c
+++ b/src/openvpnmsica/dllmain.c
@@ -193,6 +193,6 @@ x_msg_va(const unsigned int flags, const char *format, va_list arglist)
}
}
- MsiProcessMessage(s->hInstall, INSTALLMESSAGE_ERROR, hRecordProg);
+ MsiProcessMessage(s->hInstall, (flags & M_WARN) ? INSTALLMESSAGE_INFO : INSTALLMESSAGE_ERROR, hRecordProg);
MsiCloseHandle(hRecordProg);
}
diff --git a/src/openvpnmsica/openvpnmsica.c b/src/openvpnmsica/openvpnmsica.c
index 31e90bd..de1cf65 100644
--- a/src/openvpnmsica/openvpnmsica.c
+++ b/src/openvpnmsica/openvpnmsica.c
@@ -248,7 +248,7 @@ cleanup_OpenSCManager:
}
-static UINT
+static void
find_adapters(
_In_ MSIHANDLE hInstall,
_In_z_ LPCTSTR szzHardwareIDs,
@@ -262,12 +262,12 @@ find_adapters(
uiResult = tap_list_adapters(NULL, szzHardwareIDs, &pAdapterList);
if (uiResult != ERROR_SUCCESS)
{
- return uiResult;
+ return;
}
else if (pAdapterList == NULL)
{
/* No adapters - no fun. */
- return ERROR_SUCCESS;
+ return;
}
/* Get IPv4/v6 info for all network adapters. Actually, we're interested in link status only: up/down? */
@@ -394,7 +394,6 @@ cleanup_pAdapterAdresses:
free(pAdapterAdresses);
cleanup_pAdapterList:
tap_free_adapter_list(pAdapterList);
- return uiResult;
}
@@ -1096,12 +1095,9 @@ ProcessDeferredAction(_In_ MSIHANDLE hInstall)
dwResult = tap_create_adapter(NULL, NULL, szHardwareId, &bRebootRequired, &guidAdapter);
if (dwResult == ERROR_SUCCESS)
{
- /* Set adapter name. */
- dwResult = tap_set_adapter_name(&guidAdapter, szName);
- if (dwResult != ERROR_SUCCESS)
- {
- tap_delete_adapter(NULL, &guidAdapter, &bRebootRequired);
- }
+ /* Set adapter name. May fail on some machines, but that is not critical - use silent
+ flag to mute messagebox and print error only to log */
+ tap_set_adapter_name(&guidAdapter, szName, TRUE);
}
}
else if (wcsncmp(szArg[i], L"deleteN=", 8) == 0)
diff --git a/src/openvpnserv/interactive.c b/src/openvpnserv/interactive.c
index 207cc4a..65bb106 100644
--- a/src/openvpnserv/interactive.c
+++ b/src/openvpnserv/interactive.c
@@ -91,6 +91,7 @@ typedef enum {
block_dns,
undo_dns4,
undo_dns6,
+ undo_domain,
_undo_type_max
} undo_type_t;
typedef list_item_t *undo_lists_t[_undo_type_max];
@@ -564,6 +565,24 @@ InterfaceLuid(const char *iface_name, PNET_LUID luid)
return status;
}
+static DWORD
+ConvertInterfaceNameToIndex(const wchar_t *ifname, NET_IFINDEX *index)
+{
+ NET_LUID luid;
+ DWORD err;
+
+ err = ConvertInterfaceAliasToLuid(ifname, &luid);
+ if (err == ERROR_SUCCESS)
+ {
+ err = ConvertInterfaceLuidToIndex(&luid, index);
+ }
+ if (err != ERROR_SUCCESS)
+ {
+ MsgToEventLog(M_ERR, L"Failed to find interface index for <%s>", ifname);
+ }
+ return err;
+}
+
static BOOL
CmpAddress(LPVOID item, LPVOID address)
{
@@ -1057,6 +1076,53 @@ out:
return err;
}
+/**
+ * Run command: wmic nicconfig (InterfaceIndex=$if_index) call $action ($data)
+ * @param if_index "index of interface"
+ * @param action e.g., "SetDNSDomain"
+ * @param data data if required for action
+ * - a single word for SetDNSDomain, empty or NULL to delete
+ * - comma separated values for a list
+ */
+static DWORD
+wmic_nicconfig_cmd(const wchar_t *action, const NET_IFINDEX if_index,
+ const wchar_t *data)
+{
+ DWORD err = 0;
+ wchar_t argv0[MAX_PATH];
+ wchar_t *cmdline = NULL;
+ int timeout = 10000; /* in msec */
+
+ swprintf(argv0, _countof(argv0), L"%s\\%s", get_win_sys_path(), L"wbem\\wmic.exe");
+ argv0[_countof(argv0) - 1] = L'\0';
+
+ const wchar_t *fmt;
+ /* comma separated list must be enclosed in parenthesis */
+ if (data && wcschr(data, L','))
+ {
+ fmt = L"wmic nicconfig where (InterfaceIndex=%ld) call %s (%s)";
+ }
+ else
+ {
+ fmt = L"wmic nicconfig where (InterfaceIndex=%ld) call %s %s";
+ }
+
+ size_t ncmdline = wcslen(fmt) + 20 + wcslen(action) /* max 20 for ifindex */
+ + (data ? wcslen(data) + 1 : 1);
+ cmdline = malloc(ncmdline*sizeof(wchar_t));
+ if (!cmdline)
+ {
+ return ERROR_OUTOFMEMORY;
+ }
+
+ openvpn_sntprintf(cmdline, ncmdline, fmt, if_index, action,
+ data? data : L"");
+ err = ExecCommand(argv0, cmdline, timeout);
+
+ free(cmdline);
+ return err;
+}
+
/* Delete all IPv4 or IPv6 dns servers for an interface */
static DWORD
DeleteDNS(short family, wchar_t *if_name)
@@ -1079,6 +1145,54 @@ CmpWString(LPVOID item, LPVOID str)
return (wcscmp(item, str) == 0) ? TRUE : FALSE;
}
+/**
+ * Set interface specific DNS domain suffix
+ * @param if_name name of the the interface
+ * @param domain a single domain name
+ * @param lists pointer to the undo lists. If NULL
+ * undo lists are not altered.
+ * Will delete the currently set value if domain is empty.
+ */
+static DWORD
+SetDNSDomain(const wchar_t *if_name, const char *domain, undo_lists_t *lists)
+{
+ NET_IFINDEX if_index;
+
+ DWORD err = ConvertInterfaceNameToIndex(if_name, &if_index);
+ if (err != ERROR_SUCCESS)
+ {
+ return err;
+ }
+
+ wchar_t *wdomain = utf8to16(domain); /* utf8 to wide-char */
+ if (!wdomain)
+ {
+ return ERROR_OUTOFMEMORY;
+ }
+
+ /* free undo list if previously set */
+ if (lists)
+ {
+ free(RemoveListItem(&(*lists)[undo_domain], CmpWString, (void *)if_name));
+ }
+
+ err = wmic_nicconfig_cmd(L"SetDNSDomain", if_index, wdomain);
+
+ /* Add to undo list if domain is non-empty */
+ if (err == 0 && wdomain[0] && lists)
+ {
+ wchar_t *tmp_name = wcsdup(if_name);
+ if (!tmp_name || AddListItem(&(*lists)[undo_domain], tmp_name))
+ {
+ free(tmp_name);
+ err = ERROR_OUTOFMEMORY;
+ }
+ }
+
+ free(wdomain);
+ return err;
+}
+
static DWORD
HandleDNSConfigMessage(const dns_cfg_message_t *msg, undo_lists_t *lists)
{
@@ -1098,6 +1212,13 @@ HandleDNSConfigMessage(const dns_cfg_message_t *msg, undo_lists_t *lists)
return ERROR_MESSAGE_DATA;
}
+ /* use a non-const reference with limited scope to enforce null-termination of strings from client */
+ {
+ dns_cfg_message_t *msgptr = (dns_cfg_message_t *) msg;
+ msgptr->iface.name[_countof(msg->iface.name)-1] = '\0';
+ msgptr->domains[_countof(msg->domains)-1] = '\0';
+ }
+
wchar_t *wide_name = utf8to16(msg->iface.name); /* utf8 to wide-char */
if (!wide_name)
{
@@ -1117,9 +1238,14 @@ HandleDNSConfigMessage(const dns_cfg_message_t *msg, undo_lists_t *lists)
free(RemoveListItem(&(*lists)[undo_type], CmpWString, wide_name));
}
- if (msg->header.type == msg_del_dns_cfg) /* job done */
+ if (msg->header.type == msg_del_dns_cfg)
{
- goto out;
+ if (msg->domains[0])
+ {
+ /* setting an empty domain removes any previous value */
+ err = SetDNSDomain(wide_name, "", lists);
+ }
+ goto out; /* job done */
}
for (int i = 0; i < addr_len; ++i)
@@ -1142,6 +1268,8 @@ HandleDNSConfigMessage(const dns_cfg_message_t *msg, undo_lists_t *lists)
*/
}
+ err = 0;
+
if (msg->addr_len > 0)
{
wchar_t *tmp_name = wcsdup(wide_name);
@@ -1154,7 +1282,10 @@ HandleDNSConfigMessage(const dns_cfg_message_t *msg, undo_lists_t *lists)
}
}
- err = 0;
+ if (msg->domains[0])
+ {
+ err = SetDNSDomain(wide_name, msg->domains, lists);
+ }
out:
free(wide_name);
@@ -1445,6 +1576,10 @@ Undo(undo_lists_t *lists)
DeleteDNS(AF_INET6, item->data);
break;
+ case undo_domain:
+ SetDNSDomain(item->data, "", NULL);
+ break;
+
case block_dns:
interface_data = (block_dns_data_t *)(item->data);
delete_block_dns_filters(interface_data->engine);
diff --git a/src/tapctl/main.c b/src/tapctl/main.c
index 31bb2ec..d5bc729 100644
--- a/src/tapctl/main.c
+++ b/src/tapctl/main.c
@@ -237,7 +237,7 @@ _tmain(int argc, LPCTSTR argv[])
}
/* Rename the adapter. */
- dwResult = tap_set_adapter_name(&guidAdapter, szName);
+ dwResult = tap_set_adapter_name(&guidAdapter, szName, FALSE);
if (dwResult != ERROR_SUCCESS)
{
StringFromIID((REFIID)&guidAdapter, &szAdapterId);
diff --git a/src/tapctl/tap.c b/src/tapctl/tap.c
index 7cb3ded..dd4a10a 100644
--- a/src/tapctl/tap.c
+++ b/src/tapctl/tap.c
@@ -33,18 +33,69 @@
#include <setupapi.h>
#include <stdio.h>
#include <tchar.h>
+#include <newdev.h>
#ifdef _MSC_VER
#pragma comment(lib, "advapi32.lib")
#pragma comment(lib, "ole32.lib")
#pragma comment(lib, "setupapi.lib")
+#pragma comment(lib, "newdev.lib")
#endif
+
const static GUID GUID_DEVCLASS_NET = { 0x4d36e972L, 0xe325, 0x11ce, { 0xbf, 0xc1, 0x08, 0x00, 0x2b, 0xe1, 0x03, 0x18 } };
const static TCHAR szAdapterRegKeyPathTemplate[] = TEXT("SYSTEM\\CurrentControlSet\\Control\\Network\\%") TEXT(PRIsLPOLESTR) TEXT("\\%") TEXT(PRIsLPOLESTR) TEXT("\\Connection");
#define ADAPTER_REGKEY_PATH_MAX (_countof(TEXT("SYSTEM\\CurrentControlSet\\Control\\Network\\")) - 1 + 38 + _countof(TEXT("\\")) - 1 + 38 + _countof(TEXT("\\Connection")))
+/**
+ * Dynamically load a library and find a function in it
+ *
+ * @param libname Name of the library to load
+ * @param funcname Name of the function to find
+ * @param m Pointer to a module. On return this is set to the
+ * the handle to the loaded library. The caller must
+ * free it by calling FreeLibrary() if not NULL.
+ *
+ * @return Pointer to the function
+ * NULL on error -- use GetLastError() to find the error code.
+ *
+ **/
+static void *
+find_function(const WCHAR *libname, const char *funcname, HMODULE *m)
+{
+ WCHAR libpath[MAX_PATH];
+ void *fptr = NULL;
+
+ /* Make sure the dll is loaded from the system32 folder */
+ if (!GetSystemDirectoryW(libpath, _countof(libpath)))
+ {
+ return NULL;
+ }
+
+ size_t len = _countof(libpath) - wcslen(libpath) - 1;
+ if (len < wcslen(libname) + 1)
+ {
+ SetLastError(ERROR_INSUFFICIENT_BUFFER);
+ return NULL;
+ }
+ wcsncat(libpath, L"\\", len);
+ wcsncat(libpath, libname, len-1);
+
+ *m = LoadLibraryW(libpath);
+ if (*m == NULL)
+ {
+ return NULL;
+ }
+ fptr = GetProcAddress(*m, funcname);
+ if (!fptr)
+ {
+ FreeLibrary(*m);
+ *m = NULL;
+ return NULL;
+ }
+ return fptr;
+}
/**
* Returns length of string of strings
@@ -678,6 +729,7 @@ tap_create_adapter(
_Out_ LPGUID pguidAdapter)
{
DWORD dwResult;
+ HMODULE libnewdev = NULL;
if (szHwId == NULL
|| pbRebootRequired == NULL
@@ -746,129 +798,7 @@ tap_create_adapter(
goto cleanup_hDevInfoList;
}
- /* Search for the driver. */
- if (!SetupDiBuildDriverInfoList(
- hDevInfoList,
- &devinfo_data,
- SPDIT_CLASSDRIVER))
- {
- dwResult = GetLastError();
- msg(M_NONFATAL, "%s: SetupDiBuildDriverInfoList failed", __FUNCTION__);
- goto cleanup_hDevInfoList;
- }
- DWORDLONG dwlDriverVersion = 0;
- DWORD drvinfo_detail_data_size = sizeof(SP_DRVINFO_DETAIL_DATA) + 0x100;
- SP_DRVINFO_DETAIL_DATA *drvinfo_detail_data = (SP_DRVINFO_DETAIL_DATA *)malloc(drvinfo_detail_data_size);
- if (drvinfo_detail_data == NULL)
- {
- msg(M_FATAL, "%s: malloc(%u) failed", __FUNCTION__, drvinfo_detail_data_size);
- dwResult = ERROR_OUTOFMEMORY; goto cleanup_DriverInfoList;
- }
-
- for (DWORD dwIndex = 0;; dwIndex++)
- {
- /* Get a driver from the list. */
- SP_DRVINFO_DATA drvinfo_data = { .cbSize = sizeof(SP_DRVINFO_DATA) };
- if (!SetupDiEnumDriverInfo(
- hDevInfoList,
- &devinfo_data,
- SPDIT_CLASSDRIVER,
- dwIndex,
- &drvinfo_data))
- {
- if (GetLastError() == ERROR_NO_MORE_ITEMS)
- {
- break;
- }
- else
- {
- /* Something is wrong with this driver. Skip it. */
- msg(M_WARN | M_ERRNO, "%s: SetupDiEnumDriverInfo(%u) failed", __FUNCTION__, dwIndex);
- continue;
- }
- }
-
- /* Get driver info details. */
- DWORD dwSize;
- drvinfo_detail_data->cbSize = sizeof(SP_DRVINFO_DETAIL_DATA);
- if (!SetupDiGetDriverInfoDetail(
- hDevInfoList,
- &devinfo_data,
- &drvinfo_data,
- drvinfo_detail_data,
- drvinfo_detail_data_size,
- &dwSize))
- {
- if (GetLastError() == ERROR_INSUFFICIENT_BUFFER)
- {
- /* (Re)allocate buffer. */
- if (drvinfo_detail_data)
- {
- free(drvinfo_detail_data);
- }
-
- drvinfo_detail_data_size = dwSize;
- drvinfo_detail_data = (SP_DRVINFO_DETAIL_DATA *)malloc(drvinfo_detail_data_size);
- if (drvinfo_detail_data == NULL)
- {
- msg(M_FATAL, "%s: malloc(%u) failed", __FUNCTION__, drvinfo_detail_data_size);
- dwResult = ERROR_OUTOFMEMORY; goto cleanup_DriverInfoList;
- }
-
- /* Re-get driver info details. */
- drvinfo_detail_data->cbSize = sizeof(SP_DRVINFO_DETAIL_DATA);
- if (!SetupDiGetDriverInfoDetail(
- hDevInfoList,
- &devinfo_data,
- &drvinfo_data,
- drvinfo_detail_data,
- drvinfo_detail_data_size,
- &dwSize))
- {
- /* Something is wrong with this driver. Skip it. */
- continue;
- }
- }
- else
- {
- /* Something is wrong with this driver. Skip it. */
- msg(M_WARN | M_ERRNO, "%s: SetupDiGetDriverInfoDetail(\"%hs\") failed", __FUNCTION__, drvinfo_data.Description);
- continue;
- }
- }
-
- /* Check the driver version and hardware ID. */
- if (dwlDriverVersion < drvinfo_data.DriverVersion
- && drvinfo_detail_data->HardwareID
- && _tcszistr(drvinfo_detail_data->HardwareID, szHwId))
- {
- /* Newer version and matching hardware ID found. Select the driver. */
- if (!SetupDiSetSelectedDriver(
- hDevInfoList,
- &devinfo_data,
- &drvinfo_data))
- {
- /* Something is wrong with this driver. Skip it. */
- msg(M_WARN | M_ERRNO, "%s: SetupDiSetSelectedDriver(\"%hs\") failed", __FUNCTION__, drvinfo_data.Description);
- continue;
- }
-
- dwlDriverVersion = drvinfo_data.DriverVersion;
- }
- }
- if (drvinfo_detail_data)
- {
- free(drvinfo_detail_data);
- }
-
- if (dwlDriverVersion == 0)
- {
- dwResult = ERROR_NOT_FOUND;
- msg(M_NONFATAL, "%s: No driver for device \"%" PRIsLPTSTR "\" installed.", __FUNCTION__, szHwId);
- goto cleanup_DriverInfoList;
- }
-
- /* Call appropriate class installer. */
+ /* Register the device instance with the PnP Manager */
if (!SetupDiCallClassInstaller(
DIF_REGISTERDEVICE,
hDevInfoList,
@@ -876,43 +806,38 @@ tap_create_adapter(
{
dwResult = GetLastError();
msg(M_NONFATAL, "%s: SetupDiCallClassInstaller(DIF_REGISTERDEVICE) failed", __FUNCTION__);
- goto cleanup_DriverInfoList;
+ goto cleanup_hDevInfoList;
}
- /* Register device co-installers if any. */
- if (!SetupDiCallClassInstaller(
- DIF_REGISTER_COINSTALLERS,
- hDevInfoList,
- &devinfo_data))
- {
- dwResult = GetLastError();
- msg(M_WARN | M_ERRNO, "%s: SetupDiCallClassInstaller(DIF_REGISTER_COINSTALLERS) failed", __FUNCTION__);
- }
+ /* Install the device using DiInstallDevice()
+ * We instruct the system to use the best driver in the driver store
+ * by setting the drvinfo argument of DiInstallDevice as NULL. This
+ * assumes a driver is already installed in the driver store.
+ */
+#ifdef HAVE_DIINSTALLDEVICE
+ if (!DiInstallDevice(hwndParent, hDevInfoList, &devinfo_data, NULL, 0, pbRebootRequired))
+#else
+ /* mingw does not resolve DiInstallDevice, so load it at run time. */
+ typedef BOOL (WINAPI *DiInstallDeviceFn) (HWND, HDEVINFO, SP_DEVINFO_DATA *,
+ SP_DRVINFO_DATA *, DWORD, BOOL *);
+ DiInstallDeviceFn installfn
+ = find_function (L"newdev.dll", "DiInstallDevice", &libnewdev);
- /* Install adapters if any. */
- if (!SetupDiCallClassInstaller(
- DIF_INSTALLINTERFACES,
- hDevInfoList,
- &devinfo_data))
+ if (!installfn)
{
dwResult = GetLastError();
- msg(M_WARN | M_ERRNO, "%s: SetupDiCallClassInstaller(DIF_INSTALLINTERFACES) failed", __FUNCTION__);
+ msg(M_NONFATAL | M_ERRNO, "%s: Failed to locate DiInstallDevice()", __FUNCTION__);
+ goto cleanup_hDevInfoList;
}
- /* Install the device. */
- if (!SetupDiCallClassInstaller(
- DIF_INSTALLDEVICE,
- hDevInfoList,
- &devinfo_data))
+ if (!installfn(hwndParent, hDevInfoList, &devinfo_data, NULL, 0, pbRebootRequired))
+#endif
{
dwResult = GetLastError();
- msg(M_NONFATAL | M_ERRNO, "%s: SetupDiCallClassInstaller(DIF_INSTALLDEVICE) failed", __FUNCTION__);
+ msg(M_NONFATAL | M_ERRNO, "%s: DiInstallDevice failed", __FUNCTION__);
goto cleanup_remove_device;
}
- /* Check if a system reboot is required. (Ignore errors) */
- check_reboot(hDevInfoList, &devinfo_data, pbRebootRequired);
-
/* Get network adapter ID from registry. Retry for max 30sec. */
dwResult = get_net_adapter_guid(hDevInfoList, &devinfo_data, 30, pguidAdapter);
@@ -958,13 +883,11 @@ cleanup_remove_device:
}
}
-cleanup_DriverInfoList:
- SetupDiDestroyDriverInfoList(
- hDevInfoList,
- &devinfo_data,
- SPDIT_CLASSDRIVER);
-
cleanup_hDevInfoList:
+ if (libnewdev)
+ {
+ FreeLibrary(libnewdev);
+ }
SetupDiDestroyDeviceInfoList(hDevInfoList);
return dwResult;
}
@@ -1140,9 +1063,12 @@ ExecCommand(const WCHAR* cmdline)
DWORD
tap_set_adapter_name(
_In_ LPCGUID pguidAdapter,
- _In_ LPCTSTR szName)
+ _In_ LPCTSTR szName,
+ _In_ BOOL bSilent)
{
DWORD dwResult;
+ int msg_flag = bSilent ? M_WARN : M_NONFATAL;
+ msg_flag |= M_ERRNO;
if (pguidAdapter == NULL || szName == NULL)
{
@@ -1176,7 +1102,7 @@ tap_set_adapter_name(
if (dwResult != ERROR_SUCCESS)
{
SetLastError(dwResult); /* MSDN does not mention RegOpenKeyEx() to set GetLastError(). But we do have an error code. Set last error manually. */
- msg(M_NONFATAL | M_ERRNO, "%s: RegOpenKeyEx(HKLM, \"%" PRIsLPTSTR "\") failed", __FUNCTION__, szRegKey);
+ msg(msg_flag, "%s: RegOpenKeyEx(HKLM, \"%" PRIsLPTSTR "\") failed", __FUNCTION__, szRegKey);
goto cleanup_szAdapterId;
}
@@ -1185,7 +1111,7 @@ tap_set_adapter_name(
if (dwResult != ERROR_SUCCESS)
{
SetLastError(dwResult);
- msg(M_NONFATAL | M_ERRNO, "%s: Error reading adapter name", __FUNCTION__);
+ msg(msg_flag, "%s: Error reading adapter name", __FUNCTION__);
goto cleanup_hKey;
}
@@ -1203,7 +1129,7 @@ tap_set_adapter_name(
if (dwResult != ERROR_SUCCESS)
{
SetLastError(dwResult);
- msg(M_NONFATAL | M_ERRNO, "%s: Error renaming adapter", __FUNCTION__);
+ msg(msg_flag, "%s: Error renaming adapter", __FUNCTION__);
goto cleanup_hKey;
}
diff --git a/src/tapctl/tap.h b/src/tapctl/tap.h
index 102de32..63d791c 100644
--- a/src/tapctl/tap.h
+++ b/src/tapctl/tap.h
@@ -118,12 +118,16 @@ tap_enable_adapter(
*
* @param szName New adapter name - must be unique
*
+ * @param bSilent If true, MSI installer won't display message box and
+ * only print error to log.
+ *
* @return ERROR_SUCCESS on success; Win32 error code otherwise
**/
DWORD
tap_set_adapter_name(
_In_ LPCGUID pguidAdapter,
- _In_ LPCTSTR szName);
+ _In_ LPCTSTR szName,
+ _In_ BOOL bSilent);
/**