Home
last modified time | relevance | path

Searched full:partial (Results 1 – 25 of 1256) sorted by relevance

12345678910>>...51

/Linux-v6.6/fs/minix/
Ditree_common.c158 Indirect *partial; in get_block() local
166 partial = get_branch(inode, depth, offsets, chain, &err); in get_block()
169 if (!partial) { in get_block()
173 partial = chain+depth-1; /* the whole chain */ in get_block()
180 while (partial > chain) { in get_block()
181 brelse(partial->bh); in get_block()
182 partial--; in get_block()
196 left = (chain + depth) - partial; in get_block()
197 err = alloc_branch(inode, left, offsets+(partial-chain), partial); in get_block()
201 if (splice_branch(inode, chain, partial, left) < 0) in get_block()
[all …]
/Linux-v6.6/include/crypto/
Dsha1_base.h41 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_base_do_update() local
45 if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { in sha1_base_do_update()
48 if (partial) { in sha1_base_do_update()
49 int p = SHA1_BLOCK_SIZE - partial; in sha1_base_do_update()
51 memcpy(sctx->buffer + partial, data, p); in sha1_base_do_update()
65 partial = 0; in sha1_base_do_update()
68 memcpy(sctx->buffer + partial, data, len); in sha1_base_do_update()
79 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_base_do_finalize() local
81 sctx->buffer[partial++] = 0x80; in sha1_base_do_finalize()
82 if (partial > bit_offset) { in sha1_base_do_finalize()
[all …]
Dsm3_base.h44 unsigned int partial = sctx->count % SM3_BLOCK_SIZE; in sm3_base_do_update() local
48 if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { in sm3_base_do_update()
51 if (partial) { in sm3_base_do_update()
52 int p = SM3_BLOCK_SIZE - partial; in sm3_base_do_update()
54 memcpy(sctx->buffer + partial, data, p); in sm3_base_do_update()
68 partial = 0; in sm3_base_do_update()
71 memcpy(sctx->buffer + partial, data, len); in sm3_base_do_update()
82 unsigned int partial = sctx->count % SM3_BLOCK_SIZE; in sm3_base_do_finalize() local
84 sctx->buffer[partial++] = 0x80; in sm3_base_do_finalize()
85 if (partial > bit_offset) { in sm3_base_do_finalize()
[all …]
Dsha256_base.h42 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in lib_sha256_base_do_update() local
46 if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { in lib_sha256_base_do_update()
49 if (partial) { in lib_sha256_base_do_update()
50 int p = SHA256_BLOCK_SIZE - partial; in lib_sha256_base_do_update()
52 memcpy(sctx->buf + partial, data, p); in lib_sha256_base_do_update()
66 partial = 0; in lib_sha256_base_do_update()
69 memcpy(sctx->buf + partial, data, len); in lib_sha256_base_do_update()
89 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in lib_sha256_base_do_finalize() local
91 sctx->buf[partial++] = 0x80; in lib_sha256_base_do_finalize()
92 if (partial > bit_offset) { in lib_sha256_base_do_finalize()
[all …]
Dsha512_base.h62 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_update() local
68 if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { in sha512_base_do_update()
71 if (partial) { in sha512_base_do_update()
72 int p = SHA512_BLOCK_SIZE - partial; in sha512_base_do_update()
74 memcpy(sctx->buf + partial, data, p); in sha512_base_do_update()
88 partial = 0; in sha512_base_do_update()
91 memcpy(sctx->buf + partial, data, len); in sha512_base_do_update()
102 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_finalize() local
104 sctx->buf[partial++] = 0x80; in sha512_base_do_finalize()
105 if (partial > bit_offset) { in sha512_base_do_finalize()
[all …]
/Linux-v6.6/fs/sysv/
Ditree.c213 Indirect *partial; in get_block() local
222 partial = get_branch(inode, depth, offsets, chain, &err); in get_block()
226 if (!partial) { in get_block()
231 partial = chain+depth-1; /* the whole chain */ in get_block()
238 while (partial > chain) { in get_block()
239 brelse(partial->bh); in get_block()
240 partial--; in get_block()
254 left = (chain + depth) - partial; in get_block()
255 err = alloc_branch(inode, left, offsets+(partial-chain), partial); in get_block()
259 if (splice_branch(inode, chain, partial, left) < 0) in get_block()
[all …]
/Linux-v6.6/fs/ext4/
Dindirect.c244 * @partial: pointer to the last triple within a chain
252 Indirect *partial) in ext4_find_goal() argument
260 goal = ext4_find_near(inode, partial); in ext4_find_goal()
316 * we had read the existing part of chain and partial points to the last
538 Indirect *partial; in ext4_ind_map_blocks() local
554 partial = ext4_get_branch(inode, depth, offsets, chain, &err); in ext4_ind_map_blocks()
557 if (!partial) { in ext4_ind_map_blocks()
580 * Count number blocks in a subtree under 'partial'. At each in ext4_ind_map_blocks()
586 for (i = partial - chain + 1; i < depth; i++) in ext4_ind_map_blocks()
620 ar.goal = ext4_find_goal(inode, map->m_lblk, partial); in ext4_ind_map_blocks()
[all …]
/Linux-v6.6/include/linux/
Dslub_def.h21 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
23 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
38 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
39 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
40 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
41 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
60 struct slab *partial; /* Partially allocated frozen slabs */ member
70 #define slub_percpu_partial(c) ((c)->partial)
102 /* Used for retrieving partial slabs, etc. */
110 /* Number of per cpu partial objects to keep around */
[all …]
/Linux-v6.6/drivers/crypto/
Dpadlock-sha.c284 unsigned int partial, done; in padlock_sha1_update_nano() local
291 partial = sctx->count & 0x3f; in padlock_sha1_update_nano()
297 if ((partial + len) >= SHA1_BLOCK_SIZE) { in padlock_sha1_update_nano()
300 if (partial) { in padlock_sha1_update_nano()
301 done = -partial; in padlock_sha1_update_nano()
302 memcpy(sctx->buffer + partial, data, in padlock_sha1_update_nano()
321 partial = 0; in padlock_sha1_update_nano()
324 memcpy(sctx->buffer + partial, src, len - done); in padlock_sha1_update_nano()
332 unsigned int partial, padlen; in padlock_sha1_final_nano() local
339 partial = state->count & 0x3f; in padlock_sha1_final_nano()
[all …]
/Linux-v6.6/arch/arm64/crypto/
Dsha3-ce-glue.c43 if ((sctx->partial + len) >= sctx->rsiz) { in sha3_update()
46 if (sctx->partial) { in sha3_update()
47 int p = sctx->rsiz - sctx->partial; in sha3_update()
49 memcpy(sctx->buf + sctx->partial, data, p); in sha3_update()
56 sctx->partial = 0; in sha3_update()
75 memcpy(sctx->buf + sctx->partial, data, len); in sha3_update()
76 sctx->partial += len; in sha3_update()
91 sctx->buf[sctx->partial++] = 0x06; in sha3_final()
92 memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial); in sha3_final()
/Linux-v6.6/arch/powerpc/crypto/
Dsha1.c30 unsigned int partial, done; in powerpc_sha1_update() local
33 partial = sctx->count & 0x3f; in powerpc_sha1_update()
38 if ((partial + len) > 63) { in powerpc_sha1_update()
40 if (partial) { in powerpc_sha1_update()
41 done = -partial; in powerpc_sha1_update()
42 memcpy(sctx->buffer + partial, data, done + 64); in powerpc_sha1_update()
52 partial = 0; in powerpc_sha1_update()
54 memcpy(sctx->buffer + partial, src, len - done); in powerpc_sha1_update()
/Linux-v6.6/fs/ext2/
Dinode.c325 * @partial: pointer to the last triple within a chain
331 Indirect *partial) in ext2_find_goal() argument
346 return ext2_find_near(inode, partial); in ext2_find_goal()
466 * we had read the existing part of chain and partial points to the last
632 Indirect *partial; in ext2_get_blocks() local
648 partial = ext2_get_branch(inode, depth, offsets, chain, &err); in ext2_get_blocks()
650 if (!partial) { in ext2_get_blocks()
666 partial = chain + depth - 1; in ext2_get_blocks()
696 if (err == -EAGAIN || !verify_chain(chain, partial)) { in ext2_get_blocks()
697 while (partial > chain) { in ext2_get_blocks()
[all …]
/Linux-v6.6/arch/sparc/crypto/
Dsha1_glue.c31 unsigned int len, unsigned int partial) in __sha1_sparc64_update() argument
36 if (partial) { in __sha1_sparc64_update()
37 done = SHA1_BLOCK_SIZE - partial; in __sha1_sparc64_update()
38 memcpy(sctx->buffer + partial, data, done); in __sha1_sparc64_update()
55 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_sparc64_update() local
58 if (partial + len < SHA1_BLOCK_SIZE) { in sha1_sparc64_update()
60 memcpy(sctx->buffer + partial, data, len); in sha1_sparc64_update()
62 __sha1_sparc64_update(sctx, data, len, partial); in sha1_sparc64_update()
Dsha256_glue.c31 unsigned int len, unsigned int partial) in __sha256_sparc64_update() argument
36 if (partial) { in __sha256_sparc64_update()
37 done = SHA256_BLOCK_SIZE - partial; in __sha256_sparc64_update()
38 memcpy(sctx->buf + partial, data, done); in __sha256_sparc64_update()
55 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in sha256_sparc64_update() local
58 if (partial + len < SHA256_BLOCK_SIZE) { in sha256_sparc64_update()
60 memcpy(sctx->buf + partial, data, len); in sha256_sparc64_update()
62 __sha256_sparc64_update(sctx, data, len, partial); in sha256_sparc64_update()
Dsha512_glue.c30 unsigned int len, unsigned int partial) in __sha512_sparc64_update() argument
36 if (partial) { in __sha512_sparc64_update()
37 done = SHA512_BLOCK_SIZE - partial; in __sha512_sparc64_update()
38 memcpy(sctx->buf + partial, data, done); in __sha512_sparc64_update()
55 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_sparc64_update() local
58 if (partial + len < SHA512_BLOCK_SIZE) { in sha512_sparc64_update()
61 memcpy(sctx->buf + partial, data, len); in sha512_sparc64_update()
63 __sha512_sparc64_update(sctx, data, len, partial); in sha512_sparc64_update()
Dmd5_glue.c47 unsigned int len, unsigned int partial) in __md5_sparc64_update() argument
52 if (partial) { in __md5_sparc64_update()
53 done = MD5_HMAC_BLOCK_SIZE - partial; in __md5_sparc64_update()
54 memcpy((u8 *)sctx->block + partial, data, done); in __md5_sparc64_update()
71 unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; in md5_sparc64_update() local
74 if (partial + len < MD5_HMAC_BLOCK_SIZE) { in md5_sparc64_update()
76 memcpy((u8 *)sctx->block + partial, data, len); in md5_sparc64_update()
78 __md5_sparc64_update(sctx, data, len, partial); in md5_sparc64_update()
/Linux-v6.6/Documentation/devicetree/bindings/fpga/
Dfpga-region.txt18 FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in
34 Partial Reconfiguration (PR)
39 Partial Reconfiguration Region (PRR)
51 * Also called a "partial bit stream"
64 * During Partial Reconfiguration of a specific region, that region's bridge
79 * A base image may set up a set of partial reconfiguration regions that may
150 For partial reconfiguration (PR), each PR region will have an FPGA Region.
185 - partial-fpga-config : boolean, set if partial reconfiguration is to be done,
297 * Partial reconfiguration with bridges in the FPGA
301 region while the buses are enabled for other sections. Before any partial
[all …]
/Linux-v6.6/Documentation/ABI/testing/
Dsysfs-kernel-slab95 allocation from a partial or new slab. It can be written to
178 The deactivate_to_head file shows how many times a partial cpu
179 slab was deactivated and added to the head of its node's partial
189 The deactivate_to_tail file shows how many times a partial cpu
190 slab was deactivated and added to the tail of its node's partial
211 partial list. It can be written to clear the current count.
254 its node's partial list. It can be written to clear the current
276 using the slow path (i.e. to a full or partial slab). It can
296 remain on a node's partial list to avoid the overhead of
325 objects are on partial slabs and from which nodes they are
[all …]
/Linux-v6.6/drivers/crypto/stm32/
Dstm32-crc32.c69 u32 partial; /* crc32c: partial in first 4 bytes of that struct */ member
136 /* Store partial result */ in stm32_crc_init()
137 ctx->partial = readl_relaxed(crc->regs + CRC_DR); in stm32_crc_init()
163 ctx->partial = crc32_le(ctx->partial, d8, length); in burst_update()
165 ctx->partial = __crc32c_le(ctx->partial, d8, length); in burst_update()
176 writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); in burst_update()
205 /* Store partial result */ in burst_update()
206 ctx->partial = readl_relaxed(crc->regs + CRC_DR); in burst_update()
249 ~ctx->partial : ctx->partial, out); in stm32_crc_final()
/Linux-v6.6/drivers/iommu/
Dio-pgfault.c33 * @partial: faults that are part of a Page Request Group for which the last
40 struct list_head partial; member
175 list_add(&iopf->list, &iopf_param->partial); in iommu_queue_iopf()
184 * need to clean up before leaving, otherwise partial faults in iommu_queue_iopf()
197 /* See if we have partial faults for this group */ in iommu_queue_iopf()
198 list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { in iommu_queue_iopf()
208 list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { in iommu_queue_iopf()
252 * iopf_queue_discard_partial - Remove all pending partial fault
253 * @queue: the queue whose partial faults need to be discarded
256 * lost and the IOMMU driver calls this to discard all partial faults. The
[all …]
/Linux-v6.6/arch/mips/cavium-octeon/crypto/
Docteon-sha1.c78 unsigned int partial; in __octeon_sha1_update() local
82 partial = sctx->count % SHA1_BLOCK_SIZE; in __octeon_sha1_update()
87 if ((partial + len) >= SHA1_BLOCK_SIZE) { in __octeon_sha1_update()
88 if (partial) { in __octeon_sha1_update()
89 done = -partial; in __octeon_sha1_update()
90 memcpy(sctx->buffer + partial, data, in __octeon_sha1_update()
101 partial = 0; in __octeon_sha1_update()
103 memcpy(sctx->buffer + partial, src, len - done); in __octeon_sha1_update()
Docteon-sha256.c70 unsigned int partial; in __octeon_sha256_update() local
74 partial = sctx->count % SHA256_BLOCK_SIZE; in __octeon_sha256_update()
79 if ((partial + len) >= SHA256_BLOCK_SIZE) { in __octeon_sha256_update()
80 if (partial) { in __octeon_sha256_update()
81 done = -partial; in __octeon_sha256_update()
82 memcpy(sctx->buf + partial, data, in __octeon_sha256_update()
93 partial = 0; in __octeon_sha256_update()
95 memcpy(sctx->buf + partial, src, len - done); in __octeon_sha256_update()
/Linux-v6.6/arch/x86/include/asm/
Dunwind.h69 * If 'partial' returns true, only the iret frame registers are valid.
72 bool *partial) in unwind_get_entry_regs() argument
77 if (partial) { in unwind_get_entry_regs()
79 *partial = !state->full_regs; in unwind_get_entry_regs()
81 *partial = false; in unwind_get_entry_regs()
89 bool *partial) in unwind_get_entry_regs() argument
/Linux-v6.6/Documentation/driver-api/md/
Draid5-ppl.rst2 Partial Parity Log
5 Partial Parity Log (PPL) is a feature available for RAID5 arrays. The issue
15 Partial parity for a write operation is the XOR of stripe data chunks not
17 write hole. XORing partial parity with the modified chunks produces parity for
26 When handling a write request PPL writes partial parity before new data and
/Linux-v6.6/tools/mm/
Dslabinfo.c36 unsigned long partial, objects, slabs, objects_partial, objects_total; member
128 "-P|--partial Sort by number of partial slabs\n" in usage()
152 "\nSorting options (--Loss, --Size, --Partial) are mutually exclusive\n" in usage()
416 printf("%-21s ", "Partial slabs"); in slab_numa()
500 printf("Add partial %8lu %8lu %3lu %3lu\n", in slab_stats()
505 printf("Remove partial %8lu %8lu %3lu %3lu\n", in slab_stats()
510 printf("Cpu partial list %8lu %8lu %3lu %3lu\n", in slab_stats()
535 printf("Moved to head of partial list %7lu %3lu%%\n", in slab_stats()
537 printf("Moved to tail of partial list %7lu %3lu%%\n", in slab_stats()
576 s->slab_size, s->slabs - s->partial - s->cpu_slabs, in report()
[all …]

12345678910>>...51