Lines Matching +full:- +full:v
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2020, Yann Collet.
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
31 - LZ4 homepage : http://www.lz4.org
32 - LZ4 source repository : https://github.com/lz4/lz4
35 /*-************************************
60 /*-************************************
65 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
73 * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
97 /*-************************************
121 /*-************************************
147 * together with a simple 8-byte copy loop as a fall-back path.
151 * before going to the fall-back path become useless overhead.
152 * This optimization happens only with the -O3 flag, and -O2 generates
153 * a simple 8-byte copy loop.
187 /*-************************************
208 #define MEM_INIT(p,v,s) memset((p),(v),(s)) argument
211 /*-************************************
217 #define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
218 #define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
219 #define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to writ…
233 #define ML_MASK ((1U<<ML_BITS)-1)
234 #define RUN_BITS (8-ML_BITS)
235 #define RUN_MASK ((1U<<RUN_BITS)-1)
238 /*-************************************
266 return ((size_t)ptr & (alignment -1)) == 0; in LZ4_isAligned()
270 /*-************************************
291 typedef size_t uptrval; /* generally true, except OpenVMS-64 */
295 typedef U64 reg_t; /* 64-bits in x32 mode */
297 typedef size_t reg_t; /* 32-bits in x32 mode */
307 /*-************************************
348 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } in LZ4_read16()
349 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } in LZ4_read32()
350 static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; } in LZ4_read_ARCH()
352 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } in LZ4_write16()
353 static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } in LZ4_write32()
418 static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
448 srcPtr -= dec64table[offset]; in LZ4_memcpy_using_offset_base()
473 * - dstEnd >= dstPtr + MINMATCH
474 * - there is at least 8 bytes available to write after dstEnd */
478 BYTE v[8]; in LZ4_memcpy_using_offset() local
484 MEM_INIT(v, *srcPtr, 8); in LZ4_memcpy_using_offset()
487 LZ4_memcpy(v, srcPtr, 2); in LZ4_memcpy_using_offset()
488 LZ4_memcpy(&v[2], srcPtr, 2); in LZ4_memcpy_using_offset()
489 LZ4_memcpy(&v[4], v, 4); in LZ4_memcpy_using_offset()
492 LZ4_memcpy(v, srcPtr, 4); in LZ4_memcpy_using_offset()
493 LZ4_memcpy(&v[4], srcPtr, 4); in LZ4_memcpy_using_offset()
500 LZ4_memcpy(dstPtr, v, 8); in LZ4_memcpy_using_offset()
503 LZ4_memcpy(dstPtr, v, 8); in LZ4_memcpy_using_offset()
510 /*-************************************
531 val ^= val - 1; in LZ4_NbCommonBytes()
532 return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56); in LZ4_NbCommonBytes()
545 return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24; in LZ4_NbCommonBytes()
569 U64 const t = (((val >> 8) - mask) | val) & mask; in LZ4_NbCommonBytes()
576 Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. in LZ4_NbCommonBytes()
577 Note that this code path is never triggered in 32-bits mode. */ in LZ4_NbCommonBytes()
607 if (likely(pIn < pInLimit-(STEPSIZE-1))) { in LZ4_count()
615 while (likely(pIn < pInLimit-(STEPSIZE-1))) { in LZ4_count()
619 return (unsigned)(pIn - pStart); in LZ4_count()
622 …if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMat… in LZ4_count()
623 if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; } in LZ4_count()
625 return (unsigned)(pIn - pStart); in LZ4_count()
630 /*-************************************
633 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
637 /*-************************************
646 * - noDict : There is no preceding content.
647 * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
649 * content (of length ctx->dictSize), which is available
652 * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
653 * else in memory, starting at ctx->dictionary with length
654 * ctx->dictSize.
655 * - usingDictCtx : Like usingExtDict, but everything concerning the preceding
657 * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
660 * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
661 * ->dictSize describe the location and size of the preceding
663 * ->dictCtx->hashTable.
669 /*-************************************
678 /*-************************************
695 /*-******************************
701 return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); in LZ4_hash4()
703 return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); in LZ4_hash4()
711 return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); in LZ4_hash5()
714 return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); in LZ4_hash5()
756 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } in LZ4_putPositionOnHash()
757 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } in LZ4_putPositionOnHash()
778 assert(h < (1U << (LZ4_MEMORY_USAGE-2))); in LZ4_getIndexOnHash()
783 assert(h < (1U << (LZ4_MEMORY_USAGE-1))); in LZ4_getIndexOnHash()
813 if ((tableType_t)cctx->tableType != clearedTable) { in LZ4_prepareTable()
815 if ((tableType_t)cctx->tableType != tableType in LZ4_prepareTable()
816 || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) in LZ4_prepareTable()
817 || ((tableType == byU32) && cctx->currentOffset > 1 GB) in LZ4_prepareTable()
822 MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); in LZ4_prepareTable()
823 cctx->currentOffset = 0; in LZ4_prepareTable()
824 cctx->tableType = (U32)clearedTable; in LZ4_prepareTable()
826 DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); in LZ4_prepareTable()
834 if (cctx->currentOffset != 0 && tableType == byU32) { in LZ4_prepareTable()
836 cctx->currentOffset += 64 KB; in LZ4_prepareTable()
840 cctx->dictCtx = NULL; in LZ4_prepareTable()
841 cctx->dictionary = NULL; in LZ4_prepareTable()
842 cctx->dictSize = 0; in LZ4_prepareTable()
848 * - source != NULL
849 * - inputSize > 0
867 U32 const startIndex = cctx->currentOffset; in LZ4_compress_generic_validated()
868 const BYTE* base = (const BYTE*) source - startIndex; in LZ4_compress_generic_validated()
871 const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; in LZ4_compress_generic_validated()
873 dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; in LZ4_compress_generic_validated()
875 dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; in LZ4_compress_generic_validated()
876 …const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; … in LZ4_compress_generic_validated()
879 U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ in LZ4_compress_generic_validated()
883 const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; in LZ4_compress_generic_validated()
884 const BYTE* const matchlimit = iend - LASTLITERALS; in LZ4_compress_generic_validated()
889 dictionary + dictSize - dictCtx->currentOffset : in LZ4_compress_generic_validated()
890 dictionary + dictSize - startIndex; in LZ4_compress_generic_validated()
907 lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); in LZ4_compress_generic_validated()
913 cctx->dictCtx = NULL; in LZ4_compress_generic_validated()
914 cctx->dictSize = (U32)inputSize; in LZ4_compress_generic_validated()
916 cctx->dictSize += (U32)inputSize; in LZ4_compress_generic_validated()
918 cctx->currentOffset += (U32)inputSize; in LZ4_compress_generic_validated()
919 cctx->tableType = (U32)tableType; in LZ4_compress_generic_validated()
924 LZ4_putPosition(ip, cctx->hashTable, tableType, base); in LZ4_compress_generic_validated()
947 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); in LZ4_compress_generic_validated()
949 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); in LZ4_compress_generic_validated()
961 U32 const current = (U32)(forwardIp - base); in LZ4_compress_generic_validated()
962 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); in LZ4_compress_generic_validated()
964 assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); in LZ4_compress_generic_validated()
976 matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); in LZ4_compress_generic_validated()
987 assert(startIndex - matchIndex >= MINMATCH); in LZ4_compress_generic_validated()
998 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); in LZ4_compress_generic_validated()
1000 DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); in LZ4_compress_generic_validated()
1007 … assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ in LZ4_compress_generic_validated()
1010 if (maybe_extMem) offset = current - matchIndex; in LZ4_compress_generic_validated()
1019 … while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } in LZ4_compress_generic_validated()
1022 { unsigned const litLength = (unsigned)(ip - anchor); in LZ4_compress_generic_validated()
1029 …s */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <… in LZ4_compress_generic_validated()
1030 op--; in LZ4_compress_generic_validated()
1034 int len = (int)(litLength - RUN_MASK); in LZ4_compress_generic_validated()
1036 for(; len >= 255 ; len-=255) *op++ = 255; in LZ4_compress_generic_validated()
1045 … (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source)); in LZ4_compress_generic_validated()
1050 * - ip : at start of LZ operation in LZ4_compress_generic_validated()
1051 …* - match : at start of previous pattern occurrence; can be within current prefix, or within extDi… in LZ4_compress_generic_validated()
1052 * - offset : if maybe_ext_memSegment==1 (constant) in LZ4_compress_generic_validated()
1053 …* - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise in LZ4_compress_generic_validated()
1054 …* - token and *token : position to write 4-bits for match length; higher 4-bits for literal length… in LZ4_compress_generic_validated()
1058 …(op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <=… in LZ4_compress_generic_validated()
1066 …DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); in LZ4_compress_generic_validated()
1070 DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); in LZ4_compress_generic_validated()
1071 assert(ip-match <= LZ4_DISTANCE_MAX); in LZ4_compress_generic_validated()
1072 LZ4_writeLE16(op, (U16)(ip - match)); op+=2; in LZ4_compress_generic_validated()
1080 const BYTE* limit = ip + (dictEnd-match); in LZ4_compress_generic_validated()
1101 …U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) … in LZ4_compress_generic_validated()
1102 ip -= matchCode - newMatchCode; in LZ4_compress_generic_validated()
1112 DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); in LZ4_compress_generic_validated()
1115 LZ4_clearHash(h, cctx->hashTable, tableType); in LZ4_compress_generic_validated()
1125 matchCode -= ML_MASK; in LZ4_compress_generic_validated()
1130 matchCode -= 4*255; in LZ4_compress_generic_validated()
1146 LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); in LZ4_compress_generic_validated()
1151 match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); in LZ4_compress_generic_validated()
1152 LZ4_putPosition(ip, cctx->hashTable, tableType, base); in LZ4_compress_generic_validated()
1160 U32 const current = (U32)(ip-base); in LZ4_compress_generic_validated()
1161 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); in LZ4_compress_generic_validated()
1166 matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); in LZ4_compress_generic_validated()
1185 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); in LZ4_compress_generic_validated()
1192 if (maybe_extMem) offset = current - matchIndex; in LZ4_compress_generic_validated()
1194 (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); in LZ4_compress_generic_validated()
1206 { size_t lastRun = (size_t)(iend - anchor); in LZ4_compress_generic_validated()
1208 (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { in LZ4_compress_generic_validated()
1212 lastRun = (size_t)(olimit-op) - 1/*token*/; in LZ4_compress_generic_validated()
1213 lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/ in LZ4_compress_generic_validated()
1221 size_t accumulator = lastRun - RUN_MASK; in LZ4_compress_generic_validated()
1223 for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; in LZ4_compress_generic_validated()
1234 *inputConsumed = (int) (((const char*)ip)-source); in LZ4_compress_generic_validated()
1236 result = (int)(((char*)op) - dest); in LZ4_compress_generic_validated()
1286 …LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donot… in LZ4_compress_fast_extState()
1318 LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; in LZ4_compress_fast_extState_fastReset()
1326 if (ctx->currentOffset) { in LZ4_compress_fast_extState_fastReset()
1340 if (ctx->currentOffset) { in LZ4_compress_fast_extState_fastReset()
1358 … LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ in LZ4_compress_fast()
1391 …return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDs… in LZ4_compress_destSize_extState()
1394 …return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDs… in LZ4_compress_destSize_extState()
1402 …LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly… in LZ4_compress_destSize()
1419 /*-******************************
1437 return sizeof(t_a) - sizeof(LZ4_stream_t); in LZ4_stream_t_alignment()
1462 LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); in LZ4_resetStream_fast()
1477 LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; in LZ4_loadDict()
1497 dict->currentOffset += 64 KB; in LZ4_loadDict()
1503 if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; in LZ4_loadDict()
1504 base = dictEnd - dict->currentOffset; in LZ4_loadDict()
1505 dict->dictionary = p; in LZ4_loadDict()
1506 dict->dictSize = (U32)(dictEnd - p); in LZ4_loadDict()
1507 dict->tableType = (U32)tableType; in LZ4_loadDict()
1509 while (p <= dictEnd-HASH_UNIT) { in LZ4_loadDict()
1510 LZ4_putPosition(p, dict->hashTable, tableType, base); in LZ4_loadDict()
1514 return (int)dict->dictSize; in LZ4_loadDict()
1519 &(dictionaryStream->internal_donotuse); in LZ4_attach_dictionary()
1523 dictCtx != NULL ? dictCtx->dictSize : 0); in LZ4_attach_dictionary()
1529 * to bump the offset to something non-zero. in LZ4_attach_dictionary()
1531 if (workingStream->internal_donotuse.currentOffset == 0) { in LZ4_attach_dictionary()
1532 workingStream->internal_donotuse.currentOffset = 64 KB; in LZ4_attach_dictionary()
1537 if (dictCtx->dictSize == 0) { in LZ4_attach_dictionary()
1541 workingStream->internal_donotuse.dictCtx = dictCtx; in LZ4_attach_dictionary()
1548 …if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow… in LZ4_renormDictT()
1550 U32 const delta = LZ4_dict->currentOffset - 64 KB; in LZ4_renormDictT()
1551 const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; in LZ4_renormDictT()
1555 if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0; in LZ4_renormDictT()
1556 else LZ4_dict->hashTable[i] -= delta; in LZ4_renormDictT()
1558 LZ4_dict->currentOffset = 64 KB; in LZ4_renormDictT()
1559 if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; in LZ4_renormDictT()
1560 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; in LZ4_renormDictT()
1571 LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse; in LZ4_compress_fast_continue()
1572 const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize; in LZ4_compress_fast_continue()
1581 if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */ in LZ4_compress_fast_continue()
1583 …s_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary… in LZ4_compress_fast_continue()
1584 streamPtr->dictSize = 0; in LZ4_compress_fast_continue()
1585 streamPtr->dictionary = (const BYTE*)source; in LZ4_compress_fast_continue()
1591 if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) { in LZ4_compress_fast_continue()
1592 streamPtr->dictSize = (U32)(dictEnd - sourceEnd); in LZ4_compress_fast_continue()
1593 if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; in LZ4_compress_fast_continue()
1594 if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; in LZ4_compress_fast_continue()
1595 streamPtr->dictionary = dictEnd - streamPtr->dictSize; in LZ4_compress_fast_continue()
1601 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) in LZ4_compress_fast_continue()
1609 if (streamPtr->dictCtx) { in LZ4_compress_fast_continue()
1612 * to offsets between dictCtx->currentOffset - 64 KB and in LZ4_compress_fast_continue()
1613 * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe in LZ4_compress_fast_continue()
1621 LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr)); in LZ4_compress_fast_continue()
1627 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { in LZ4_compress_fast_continue()
1633 streamPtr->dictionary = (const BYTE*)source; in LZ4_compress_fast_continue()
1634 streamPtr->dictSize = (U32)inputSize; in LZ4_compress_fast_continue()
1640 /* Hidden debug function, to force-test external dictionary mode */
1643 LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; in LZ4_compress_forceExtDict()
1648 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { in LZ4_compress_forceExtDict()
1654 streamPtr->dictionary = (const BYTE*)source; in LZ4_compress_forceExtDict()
1655 streamPtr->dictSize = (U32)srcSize; in LZ4_compress_forceExtDict()
1670 LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; in LZ4_saveDict()
1671 const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; in LZ4_saveDict()
1674 if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; } in LZ4_saveDict()
1678 memmove(safeBuffer, previousDictEnd - dictSize, dictSize); in LZ4_saveDict()
1680 dict->dictionary = (const BYTE*)safeBuffer; in LZ4_saveDict()
1681 dict->dictSize = (U32)dictSize; in LZ4_saveDict()
1688 /*-*******************************
1698 /* Read the variable-length literal or match length.
1700 * ip - pointer to use as input.
1701 * lencheck - end ip. Return an error if ip advances >= lencheck.
1702 * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so.
1703 * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so.
1704 * error (output) - error code. Should be set to 0 before call.
1706 typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
1752 if (src == NULL) { return -1; } in LZ4_decompress_generic()
1768 const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/; in LZ4_decompress_generic()
1769 const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/; in LZ4_decompress_generic()
1784 return ((srcSize==1) && (*ip==0)) ? 0 : -1; in LZ4_decompress_generic()
1786 if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); } in LZ4_decompress_generic()
1787 if ((endOnInput) && unlikely(srcSize==0)) { return -1; } in LZ4_decompress_generic()
1791 if ((oend - op) < FASTLOOP_SAFE_DISTANCE) { in LZ4_decompress_generic()
1796 /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */ in LZ4_decompress_generic()
1799 assert(oend - op >= FASTLOOP_SAFE_DISTANCE); in LZ4_decompress_generic()
1809 … length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error); in LZ4_decompress_generic()
1818 if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } in LZ4_decompress_generic()
1821 if (cpy>oend-8) { goto safe_literal_copy; } in LZ4_decompress_generic()
1823 … * it doesn't know input length, and only relies on end-of-block properties */ in LZ4_decompress_generic()
1829 DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); in LZ4_decompress_generic()
1831 … if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; } in LZ4_decompress_generic()
1836 * it doesn't know input length, and relies on end-of-block properties */ in LZ4_decompress_generic()
1845 match = op - offset; in LZ4_decompress_generic()
1854 … length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error); in LZ4_decompress_generic()
1858 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { in LZ4_decompress_generic()
1863 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { in LZ4_decompress_generic()
1884 if (unlikely(op+length > oend-LASTLITERALS)) { in LZ4_decompress_generic()
1887 length = MIN(length, (size_t)(oend-op)); in LZ4_decompress_generic()
1889 goto _output_error; /* end-of-block condition violated */ in LZ4_decompress_generic()
1892 if (length <= (size_t)(lowPrefix-match)) { in LZ4_decompress_generic()
1894 memmove(op, dictEnd - (lowPrefix-match), length); in LZ4_decompress_generic()
1898 size_t const copySize = (size_t)(lowPrefix - match); in LZ4_decompress_generic()
1899 size_t const restSize = length - copySize; in LZ4_decompress_generic()
1900 LZ4_memcpy(op, dictEnd - copySize, copySize); in LZ4_decompress_generic()
1902 if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ in LZ4_decompress_generic()
1916 assert((op <= oend) && (oend-op >= 32)); in LZ4_decompress_generic()
1935 /* A two-stage shortcut for the most common case: in LZ4_decompress_generic()
1945 /* strictly "less than" on input, to re-enter the loop with at least one byte */ in LZ4_decompress_generic()
1955 match = op - offset; in LZ4_decompress_generic()
1979 … length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error); in LZ4_decompress_generic()
1991 if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) ) in LZ4_decompress_generic()
1992 || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) ) in LZ4_decompress_generic()
2006 … DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op)); in LZ4_decompress_generic()
2007 … DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip)); in LZ4_decompress_generic()
2012 length = (size_t)(iend-ip); in LZ4_decompress_generic()
2021 length = (size_t)(oend-op); in LZ4_decompress_generic()
2038 …p, ip, length); /* supports overlapping memory regions; only matters for in-place decompression s… in LZ4_decompress_generic()
2046 if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) { in LZ4_decompress_generic()
2056 match = op - offset; in LZ4_decompress_generic()
2064 … length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error); in LZ4_decompress_generic()
2076 if (unlikely(op+length > oend-LASTLITERALS)) { in LZ4_decompress_generic()
2077 if (partialDecoding) length = MIN(length, (size_t)(oend-op)); in LZ4_decompress_generic()
2081 if (length <= (size_t)(lowPrefix-match)) { in LZ4_decompress_generic()
2083 memmove(op, dictEnd - (lowPrefix-match), length); in LZ4_decompress_generic()
2087 size_t const copySize = (size_t)(lowPrefix - match); in LZ4_decompress_generic()
2088 size_t const restSize = length - copySize; in LZ4_decompress_generic()
2089 LZ4_memcpy(op, dictEnd - copySize, copySize); in LZ4_decompress_generic()
2091 if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ in LZ4_decompress_generic()
2108 if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { in LZ4_decompress_generic()
2109 size_t const mlen = MIN(length, (size_t)(oend-op)); in LZ4_decompress_generic()
2130 match -= dec64table[offset]; in LZ4_decompress_generic()
2137 if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { in LZ4_decompress_generic()
2138 BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1); in LZ4_decompress_generic()
2139 …if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be li… in LZ4_decompress_generic()
2142 match += oCopyLimit - op; in LZ4_decompress_generic()
2155 DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst)); in LZ4_decompress_generic()
2156 return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ in LZ4_decompress_generic()
2158 return (int) (((const char*)ip)-src); /* Nb of input bytes read */ in LZ4_decompress_generic()
2163 return (int) (-(((const char*)ip)-src))-1; in LZ4_decompress_generic()
2192 (BYTE*)dest - 64 KB, NULL, 0); in LZ4_decompress_fast()
2202 (BYTE*)dest - 64 KB, NULL, 0); in LZ4_decompress_safe_withPrefix64k()
2219 (BYTE*)dest-prefixSize, NULL, 0); in LZ4_decompress_safe_withSmallPrefix()
2251 (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); in LZ4_decompress_safe_doubleDict()
2260 (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); in LZ4_decompress_fast_doubleDict()
2287 LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; in LZ4_setStreamDecode()
2288 lz4sd->prefixSize = (size_t) dictSize; in LZ4_setStreamDecode()
2289 lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; in LZ4_setStreamDecode()
2290 lz4sd->externalDict = NULL; in LZ4_setStreamDecode()
2291 lz4sd->extDictSize = 0; in LZ4_setStreamDecode()
2324 LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; in LZ4_decompress_safe_continue()
2327 if (lz4sd->prefixSize == 0) { in LZ4_decompress_safe_continue()
2329 assert(lz4sd->extDictSize == 0); in LZ4_decompress_safe_continue()
2332 lz4sd->prefixSize = (size_t)result; in LZ4_decompress_safe_continue()
2333 lz4sd->prefixEnd = (BYTE*)dest + result; in LZ4_decompress_safe_continue()
2334 } else if (lz4sd->prefixEnd == (BYTE*)dest) { in LZ4_decompress_safe_continue()
2336 if (lz4sd->prefixSize >= 64 KB - 1) in LZ4_decompress_safe_continue()
2338 else if (lz4sd->extDictSize == 0) in LZ4_decompress_safe_continue()
2340 lz4sd->prefixSize); in LZ4_decompress_safe_continue()
2343 … lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); in LZ4_decompress_safe_continue()
2345 lz4sd->prefixSize += (size_t)result; in LZ4_decompress_safe_continue()
2346 lz4sd->prefixEnd += result; in LZ4_decompress_safe_continue()
2349 lz4sd->extDictSize = lz4sd->prefixSize; in LZ4_decompress_safe_continue()
2350 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; in LZ4_decompress_safe_continue()
2352 lz4sd->externalDict, lz4sd->extDictSize); in LZ4_decompress_safe_continue()
2354 lz4sd->prefixSize = (size_t)result; in LZ4_decompress_safe_continue()
2355 lz4sd->prefixEnd = (BYTE*)dest + result; in LZ4_decompress_safe_continue()
2364 LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; in LZ4_decompress_fast_continue()
2368 if (lz4sd->prefixSize == 0) { in LZ4_decompress_fast_continue()
2369 assert(lz4sd->extDictSize == 0); in LZ4_decompress_fast_continue()
2372 lz4sd->prefixSize = (size_t)originalSize; in LZ4_decompress_fast_continue()
2373 lz4sd->prefixEnd = (BYTE*)dest + originalSize; in LZ4_decompress_fast_continue()
2374 } else if (lz4sd->prefixEnd == (BYTE*)dest) { in LZ4_decompress_fast_continue()
2375 if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0) in LZ4_decompress_fast_continue()
2379 … lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); in LZ4_decompress_fast_continue()
2381 lz4sd->prefixSize += (size_t)originalSize; in LZ4_decompress_fast_continue()
2382 lz4sd->prefixEnd += originalSize; in LZ4_decompress_fast_continue()
2384 lz4sd->extDictSize = lz4sd->prefixSize; in LZ4_decompress_fast_continue()
2385 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; in LZ4_decompress_fast_continue()
2387 lz4sd->externalDict, lz4sd->extDictSize); in LZ4_decompress_fast_continue()
2389 lz4sd->prefixSize = (size_t)originalSize; in LZ4_decompress_fast_continue()
2390 lz4sd->prefixEnd = (BYTE*)dest + originalSize; in LZ4_decompress_fast_continue()
2409 if (dictSize >= 64 KB - 1) { in LZ4_decompress_safe_usingDict()
2460 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
2461 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
2491 /* avoid const char * -> char * conversion warning */ in LZ4_slideInputBuffer()
2492 return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; in LZ4_slideInputBuffer()