Lines Matching full:blocks

29 				  int rounds, int blocks);
31 int rounds, int blocks);
34 int rounds, int blocks, u8 iv[]);
37 int rounds, int blocks, u8 iv[], u8 final[]);
40 int rounds, int blocks, u8 iv[]);
42 int rounds, int blocks, u8 iv[]);
46 int rounds, int blocks);
48 int rounds, int blocks, u8 iv[]);
99 int rounds, int blocks)) in __ecb_crypt() argument
109 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __ecb_crypt() local
112 blocks = round_down(blocks, in __ecb_crypt()
117 ctx->rounds, blocks); in __ecb_crypt()
120 walk.nbytes - blocks * AES_BLOCK_SIZE); in __ecb_crypt()
168 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in cbc_encrypt() local
173 ctx->enc, ctx->key.rounds, blocks, in cbc_encrypt()
191 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in cbc_decrypt() local
194 blocks = round_down(blocks, in cbc_decrypt()
199 ctx->key.rk, ctx->key.rounds, blocks, in cbc_decrypt()
203 walk.nbytes - blocks * AES_BLOCK_SIZE); in cbc_decrypt()
239 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in ctr_encrypt() local
243 blocks = round_down(blocks, in ctr_encrypt()
250 ctx->rk, ctx->rounds, blocks, walk.iv, final); in ctr_encrypt()
254 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; in ctr_encrypt()
255 u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; in ctr_encrypt()
264 walk.nbytes - blocks * AES_BLOCK_SIZE); in ctr_encrypt()
319 int rounds, int blocks, u8 iv[])) in __xts_crypt() argument
357 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __xts_crypt() local
360 blocks = round_down(blocks, in __xts_crypt()
368 if (likely(blocks > 6)) { /* plain NEON is faster otherwise */ in __xts_crypt()
375 fn(out, in, ctx->key.rk, ctx->key.rounds, blocks, in __xts_crypt()
378 out += blocks * AES_BLOCK_SIZE; in __xts_crypt()
379 in += blocks * AES_BLOCK_SIZE; in __xts_crypt()
380 nbytes -= blocks * AES_BLOCK_SIZE; in __xts_crypt()