/Linux-v6.6/lib/zstd/compress/ |
D | zstd_double_fast.c | 26 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; in ZSTD_fillDoubleHashTable() local 33 for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { in ZSTD_fillDoubleHashTable() 67 const BYTE* const iend = istart + srcSize; in ZSTD_compressBlock_doubleFast_noDict_generic() local 68 const BYTE* const ilimit = iend - HASH_READ_SIZE; in ZSTD_compressBlock_doubleFast_noDict_generic() 132 mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; in ZSTD_compressBlock_doubleFast_noDict_generic() 134 … ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); in ZSTD_compressBlock_doubleFast_noDict_generic() 143 mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; in ZSTD_compressBlock_doubleFast_noDict_generic() 183 return (size_t)(iend - anchor); in ZSTD_compressBlock_doubleFast_noDict_generic() 191 mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8; in ZSTD_compressBlock_doubleFast_noDict_generic() 199 mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; in ZSTD_compressBlock_doubleFast_noDict_generic() [all …]
|
D | zstd_fast.c | 25 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; in ZSTD_fillHashTable() local 31 for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { in ZSTD_fillHashTable() 108 const BYTE* const iend = istart + srcSize; in ZSTD_compressBlock_fast_noDict_generic() local 109 const BYTE* const ilimit = iend - HASH_READ_SIZE; in ZSTD_compressBlock_fast_noDict_generic() 262 return (size_t)(iend - anchor); in ZSTD_compressBlock_fast_noDict_generic() 283 mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); in ZSTD_compressBlock_fast_noDict_generic() 285 ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); in ZSTD_compressBlock_fast_noDict_generic() 305 size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4; in ZSTD_compressBlock_fast_noDict_generic() 309 ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength); in ZSTD_compressBlock_fast_noDict_generic() 387 const BYTE* const iend = istart + srcSize; in ZSTD_compressBlock_fast_dictMatchState_generic() local [all …]
|
D | zstd_ldm.c | 240 const BYTE* const iend = (const BYTE*)end; in ZSTD_ldm_fillFastTables() local 245 ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast); in ZSTD_ldm_fillFastTables() 249 ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast); in ZSTD_ldm_fillFastTables() 269 const BYTE* iend, ldmParams_t const* params) in ZSTD_ldm_fillHashTable() argument 282 while (ip < iend) { in ZSTD_ldm_fillHashTable() 287 hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits); in ZSTD_ldm_fillHashTable() 340 BYTE const* const iend = istart + srcSize; in ZSTD_ldm_generateSequences_internal() local 341 BYTE const* const ilimit = iend - HASH_READ_SIZE; in ZSTD_ldm_generateSequences_internal() 353 return iend - anchor; in ZSTD_ldm_generateSequences_internal() 414 cur->offset < dictLimit ? dictEnd : iend; in ZSTD_ldm_generateSequences_internal() [all …]
|
D | zstd_lazy.c | 21 const BYTE* ip, const BYTE* iend, in ZSTD_updateDUBT() argument 39 assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */ in ZSTD_updateDUBT() 40 (void)iend; in ZSTD_updateDUBT() 78 const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit; in ZSTD_insertDUBT1() local 94 assert(ip < iend); /* condition for ZSTD_count */ in ZSTD_insertDUBT1() 113 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); in ZSTD_insertDUBT1() 116 …matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); in ZSTD_insertDUBT1() 124 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ in ZSTD_insertDUBT1() 155 const BYTE* const ip, const BYTE* const iend, in ZSTD_DUBT_findBetterDictMatch() argument 192 …matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); in ZSTD_DUBT_findBetterDictMatch() [all …]
|
D | zstd_opt.c | 413 const BYTE* const ip, const BYTE* const iend, in ZSTD_insertBt1() argument 454 assert(ip <= iend-8); /* required for h calculation */ in ZSTD_insertBt1() 487 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); in ZSTD_insertBt1() 490 …matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); in ZSTD_insertBt1() 501 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ in ZSTD_insertBt1() 532 const BYTE* const ip, const BYTE* const iend, in ZSTD_updateTree_internal() argument 542 … U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict); in ZSTD_updateTree_internal() 547 assert((size_t)(iend - base) <= (size_t)(U32)(-1)); in ZSTD_updateTree_internal() 551 void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { in ZSTD_updateTree() argument 552 ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); in ZSTD_updateTree() [all …]
|
D | hist.c | 73 const BYTE* const iend = ip+sourceSize; in HIST_count_parallel_wksp() local 92 while (ip < iend-15) { in HIST_count_parallel_wksp() 118 while (ip<iend) Counting1[*ip++]++; in HIST_count_parallel_wksp()
|
D | zstd_compress_superblock.c | 442 BYTE const* const iend = ip + srcSize; in ZSTD_compressSubBlock_multi() local 499 assert(ip + decompressedSize <= iend); in ZSTD_compressSubBlock_multi() 530 if (ip < iend) { in ZSTD_compressSubBlock_multi() 531 size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock); in ZSTD_compressSubBlock_multi() 532 …GLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip)); in ZSTD_compressSubBlock_multi()
|
D | zstd_ldm.h | 26 const BYTE* iend, ldmParams_t const* params);
|
D | zstd_opt.h | 18 void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
|
D | zstd_compress.c | 3814 void const* iend) in ZSTD_overflowCorrectIfNeeded() argument 3818 … (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) { in ZSTD_overflowCorrectIfNeeded() 4117 const BYTE* const iend = ip + srcSize; in ZSTD_loadDictionaryContent() local 4135 ip = iend - maxDictSize; in ZSTD_loadDictionaryContent() 4143 ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); in ZSTD_loadDictionaryContent() 4148 ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); in ZSTD_loadDictionaryContent() 4153 ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); in ZSTD_loadDictionaryContent() 4156 ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); in ZSTD_loadDictionaryContent() 4161 ZSTD_fillHashTable(ms, iend, dtlm); in ZSTD_loadDictionaryContent() 4164 ZSTD_fillDoubleHashTable(ms, iend, dtlm); in ZSTD_loadDictionaryContent() [all …]
|
D | fse_compress.c | 598 const BYTE* const iend = istart + srcSize; in FSE_compress_usingCTable_generic() local 599 const BYTE* ip=iend; in FSE_compress_usingCTable_generic()
|
D | zstd_compress_internal.h | 557 ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) in ZSTD_safecopyLiterals() argument 559 assert(iend > ilimit_w); in ZSTD_safecopyLiterals() 565 while (ip < iend) *op++ = *ip++; in ZSTD_safecopyLiterals()
|
D | huf_compress.c | 1086 const BYTE* const iend = ip + srcSize; in HUF_compress4X_usingCTable_internal() local 1120 assert(ip <= iend); in HUF_compress4X_usingCTable_internal() 1121 …, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) … in HUF_compress4X_usingCTable_internal()
|
/Linux-v6.6/lib/zstd/common/ |
D | entropy_common.c | 63 const BYTE* const iend = istart + hbSize; in FSE_readNCount_body() local 108 if (LIKELY(ip <= iend-7)) { in FSE_readNCount_body() 111 bitCount -= (int)(8 * (iend - 7 - ip)); in FSE_readNCount_body() 113 ip = iend - 4; in FSE_readNCount_body() 137 if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { in FSE_readNCount_body() 142 bitCount -= (int)(8 * (iend - 4 - ip)); in FSE_readNCount_body() 144 ip = iend - 4; in FSE_readNCount_body() 186 if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { in FSE_readNCount_body() 190 bitCount -= (int)(8 * (iend - 4 - ip)); in FSE_readNCount_body() 192 ip = iend - 4; in FSE_readNCount_body()
|
/Linux-v6.6/arch/arm64/kernel/ |
D | head.S | 235 .macro compute_indices, vstart, vend, shift, order, istart, iend, count 237 ubfx \iend, \vend, \shift, \order 238 add \iend, \iend, \count, lsl \order 239 sub \count, \iend, \istart 262 ….macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extr… 270 compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count 272 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp 276 compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count 278 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp 282 compute_indices \vstart, \vend, #PUD_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count [all …]
|
/Linux-v6.6/lib/lz4/ |
D | lz4_decompress.c | 84 const BYTE * const iend = ip + srcSize; in LZ4_decompress_generic() local 98 const BYTE *const shortiend = iend - in LZ4_decompress_generic() 131 assert(!endOnInput || ip <= iend); in LZ4_decompress_generic() 195 if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) { in LZ4_decompress_generic() 203 ? ip < iend - RUN_MASK in LZ4_decompress_generic() 225 || (ip + length > iend - (2 + 1 + LASTLITERALS)))) in LZ4_decompress_generic() 237 && (ip + length > iend)) { in LZ4_decompress_generic() 256 && ((ip + length != iend) in LZ4_decompress_generic() 279 if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2))) in LZ4_decompress_generic() 320 if ((endOnInput) && (ip > iend - LASTLITERALS)) in LZ4_decompress_generic()
|
D | lz4_compress.c | 196 const BYTE * const iend = ip + inputSize; in LZ4_compress_generic() local 197 const BYTE * const mflimit = iend - MFLIMIT; in LZ4_compress_generic() 198 const BYTE * const matchlimit = iend - LASTLITERALS; in LZ4_compress_generic() 431 size_t const lastRun = (size_t)(iend - anchor); in LZ4_compress_generic() 534 const BYTE * const iend = ip + *srcSizePtr; in LZ4_compress_destSize_generic() local 535 const BYTE * const mflimit = iend - MFLIMIT; in LZ4_compress_destSize_generic() 536 const BYTE * const matchlimit = iend - LASTLITERALS; in LZ4_compress_destSize_generic() 690 size_t lastRunSize = (size_t)(iend - anchor); in LZ4_compress_destSize_generic()
|
D | lz4hc_compress.c | 350 const BYTE * const iend = ip + inputSize; in LZ4HC_compress_generic() local 351 const BYTE * const mflimit = iend - MFLIMIT; in LZ4HC_compress_generic() 352 const BYTE * const matchlimit = (iend - LASTLITERALS); in LZ4HC_compress_generic() 556 int lastRun = (int)(iend - anchor); in LZ4HC_compress_generic() 573 LZ4_memcpy(op, anchor, iend - anchor); in LZ4HC_compress_generic() 574 op += iend - anchor; in LZ4HC_compress_generic()
|
/Linux-v6.6/lib/zstd/decompress/ |
D | huf_decompress.c | 153 BYTE const* iend[4]; member 193 args->iend[0] = istart + 6; /* jumpTable */ in HUF_DecompressAsmArgs_init() 194 args->iend[1] = args->iend[0] + length1; in HUF_DecompressAsmArgs_init() 195 args->iend[2] = args->iend[1] + length2; in HUF_DecompressAsmArgs_init() 196 args->iend[3] = args->iend[2] + length3; in HUF_DecompressAsmArgs_init() 208 args->ip[0] = args->iend[1] - sizeof(U64); in HUF_DecompressAsmArgs_init() 209 args->ip[1] = args->iend[2] - sizeof(U64); in HUF_DecompressAsmArgs_init() 210 args->ip[2] = args->iend[3] - sizeof(U64); in HUF_DecompressAsmArgs_init() 257 if (args->ip[stream] < args->iend[stream] - 8) in HUF_initRemainingDStream() 263 bit->start = (const char*)args->iend[0]; in HUF_initRemainingDStream() [all …]
|
D | zstd_decompress_block.c | 657 const BYTE* const iend = istart + srcSize; in ZSTD_decodeSeqHeaders() local 674 RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); in ZSTD_decodeSeqHeaders() 678 RETURN_ERROR_IF(ip >= iend, srcSize_wrong, ""); in ZSTD_decodeSeqHeaders() 685 …RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encod… in ZSTD_decodeSeqHeaders() 694 ip, iend-ip, in ZSTD_decodeSeqHeaders() 706 ip, iend-ip, in ZSTD_decodeSeqHeaders() 718 ip, iend-ip, in ZSTD_decodeSeqHeaders() 1329 const BYTE* const iend = ip + seqSize; local 1347 ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), 1546 const BYTE* const iend = ip + seqSize; local [all …]
|
D | zstd_decompress.c | 1882 const char* const iend = input->size != 0 ? src + input->size : src; in ZSTD_decompressStream() local 1916 DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); in ZSTD_decompressStream() 1927 size_t const remainingInput = (size_t)(iend-ip); in ZSTD_decompressStream() 1928 assert(iend >= ip); in ZSTD_decompressStream() 1946 size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart)); in ZSTD_decompressStream() 1947 if (cSize <= (size_t)(iend-istart)) { in ZSTD_decompressStream() 2028 … size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip)); in ZSTD_decompressStream() 2035 if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ in ZSTD_decompressStream() 2041 if (ip==iend) { someMoreWork = 0; break; } /* no more input */ in ZSTD_decompressStream() 2051 assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip)); in ZSTD_decompressStream() [all …]
|
/Linux-v6.6/drivers/scsi/qla2xxx/ |
D | qla_sup.c | 3090 uint32_t istart, iend, iter, vend; in qla2x00_get_fcode_version() local 3099 iend = istart + 0x100; in qla2x00_get_fcode_version() 3104 while ((iter < iend) && !do_next) { in qla2x00_get_fcode_version()
|