/Linux-v4.19/arch/x86/crypto/ |
D | aesni-intel_avx-x86_64.S | 291 .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5 298 vpclmulqdq $0x11, \HK, \GH, \T1 # T1 = a1*b1 302 vpxor \T1, \T2,\T2 # T2 = a0*b1+a1*b0 307 vpxor \T2, \T1, \T1 # <T1:GH> = GH x HK 332 vpxor \T1, \GH, \GH # the result is in GH 337 .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6 342 vpshufd $0b01001110, \T5, \T1 343 vpxor \T5, \T1, \T1 344 vmovdqa \T1, HashKey_k(arg1) 346 GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly [all …]
|
D | sha512-avx2-asm.S | 95 T1 = %r12 # clobbers CTX2 define 192 rorx $34, a, T1 # T1 = a >> 34 # S0B 204 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 205 rorx $28, a, T1 # T1 = (a >> 28) # S0 208 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 209 mov a, T1 # T1 = a # MAJB 210 and c, T1 # T1 = a&c # MAJB 213 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 256 rorx $34, a, T1 # T1 = a >> 34 # S0B 268 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 [all …]
|
D | sha256-avx2-asm.S | 110 T1 = %r12d define 167 rorx $13, a, T1 # T1 = a >> 13 # S0B 181 xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 182 rorx $2, a, T1 # T1 = (a >> 2) # S0 186 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 187 mov a, T1 # T1 = a # MAJB 188 and c, T1 # T1 = a&c # MAJB 192 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ 217 rorx $13, a, T1 # T1 = a >> 13 # S0B 230 xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 [all …]
|
D | ghash-clmulni-intel_asm.S | 30 #define T1 %xmm2 macro 51 movaps DATA, T1 58 PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1 61 pxor T1, T2 # T2 = a0 * b1 + a1 * b0 67 pxor T2, T1 # <T1:DATA> is result of 81 pxor T3, T1 90 pxor T2, T1 91 pxor T1, DATA
|
D | sha512-avx-asm.S | 62 T1 = %rcx define 128 mov f_64, T1 # T1 = f 130 xor g_64, T1 # T1 = f ^ g 132 and e_64, T1 # T1 = (f ^ g) & e 134 xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g) 136 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 140 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h 142 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 151 add T1, d_64 # e(next_state) = d + T1 154 lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c) [all …]
|
D | sha512-ssse3-asm.S | 61 T1 = %rcx define 121 mov f_64, T1 # T1 = f 123 xor g_64, T1 # T1 = f ^ g 125 and e_64, T1 # T1 = (f ^ g) & e 127 xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g) 129 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 133 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h 135 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 144 add T1, d_64 # e(next_state) = d + T1 147 lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c) [all …]
|
D | aegis128-aesni-asm.S | 23 #define T1 %xmm7 macro 196 movdqu (%rdx), T1 200 pxor KEY, T1 201 movdqa T1, STATE0 213 aegis128_update; pxor T1, STATE3 215 aegis128_update; pxor T1, STATE1 217 aegis128_update; pxor T1, STATE4 219 aegis128_update; pxor T1, STATE2 221 aegis128_update; pxor T1, STATE0 391 movdqa \s2, T1 [all …]
|
D | sha1_ssse3_asm.S | 200 .set T1, REG_T1 define 219 mov \c, T1 220 SWAP_REG_NAMES \c, T1 221 xor \d, T1 222 and \b, T1 223 xor \d, T1 227 mov \d, T1 228 SWAP_REG_NAMES \d, T1 229 xor \c, T1 230 xor \b, T1 [all …]
|
D | morus640-sse2-asm.S | 30 #define T1 %xmm7 macro 356 pshufd $MASK3, STATE1, T1 357 pxor T1, T0 358 movdqa STATE2, T1 359 pand STATE3, T1 360 pxor T1, T0 376 pshufd $MASK3, STATE1, T1 377 pxor T1, T0 378 movdqa STATE2, T1 379 pand STATE3, T1 [all …]
|
D | morus1280-avx2-asm.S | 33 #define T1 %ymm7 macro 375 vpermq $MASK3, STATE1, T1 376 vpxor T1, T0, T0 377 vpand STATE2, STATE3, T1 378 vpxor T1, T0, T0 394 vpermq $MASK3, STATE1, T1 395 vpxor T1, T0, T0 396 vpand STATE2, STATE3, T1 397 vpxor T1, T0, T0 439 vpermq $MASK3, STATE1, T1 [all …]
|
D | aegis256-aesni-asm.S | 23 #define T1 %xmm8 macro 263 movdqa 0x10(%rsi), T1 265 movdqa T1, STATE5 271 pxor T1, T3 283 update1 T1 287 update5 T1 291 update3 T1 295 update1 T1 650 movdqa .Laegis256_counter, T1 651 pcmpgtb T1, T0
|
D | sha1_avx2_x86_64_asm.S | 117 .set T1, REG_T1 define 360 andn D, TB, T1 362 xor T1, TB 385 andn C, A, T1 /* ~b&d */ 398 xor T1, A /* F1 = (b&c) ^ (~b&d) */ 431 mov B, T1 432 or A, T1 440 and C, T1 442 or T1, A
|
D | aegis128l-aesni-asm.S | 26 #define T1 %xmm11 macro 152 movdqa T1, T0 507 movdqa MSG1, T1 508 crypt\i T0, T1 510 movdq\a T1, (\i * 0x20 + 0x10)(DST) 638 movdqa MSG1, T1 639 crypt0 T0, T1 758 movdqa MSG1, T1 767 movdqa T0, T1 771 pcmpgtb T3, T1 [all …]
|
/Linux-v4.19/arch/arm/crypto/ |
D | ghash-ce-core.S | 15 T1 .req q1 136 vmull.p64 T1, XL_L, MASK 139 vext.8 T1, T1, T1, #8 141 veor T1, T1, XL 154 vshl.i64 T1, XL, #57 156 veor T1, T1, T2 158 veor T1, T1, T2 162 vshr.u64 T1, XL, #1 164 veor XL, XL, T1 165 vshr.u64 T1, T1, #6 [all …]
|
D | sha256-armv4.pl | 53 $T1="r3"; $t3="r3"; 292 my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25"); 316 &vext_8 ($T1,@X[2],@X[3],4); # X[9..12] 323 &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12] 326 &vshr_u32 ($T1,$T0,$sigma0[2]); 335 &veor ($T1,$T1,$T2); 344 &veor ($T1,$T1,$T3); # sigma0(X[1..4]) 353 &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4]) 484 vld1.32 {$T1},[$Ktbl,:128]! 497 vadd.i32 $T1,$T1,@X[1] [all …]
|
D | sha512-armv4.pl | 507 my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31)); # temps 533 vadd.i64 $T1,$Ch,$h 536 vadd.i64 $T1,$t2 543 vadd.i64 $T1,$K 546 vadd.i64 $d,$T1 547 vadd.i64 $Maj,$T1
|
/Linux-v4.19/arch/arm64/crypto/ |
D | ghash-ce-core.S | 16 T1 .req v2 154 trn2 T1.2d, SHASH.2d, HH.2d 155 eor SHASH2.16b, SHASH2.16b, T1.16b 158 trn2 T1.2d, HH3.2d, HH4.2d 159 eor HH34.16b, HH34.16b, T1.16b 177 movi T1.8b, #8 179 eor perm1.16b, perm1.16b, T1.16b 182 ushr T1.2d, perm1.2d, #24 185 sli T1.2d, perm1.2d, #40 191 tbl sh4.16b, {SHASH.16b}, T1.16b [all …]
|
D | sha512-armv8.pl | 109 my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]); 164 ror $T1,@X[($j+1)&15],#$sigma0[0] 171 eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1] 179 eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1]) 188 add @X[$j],@X[$j],$T1 463 my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19)); 495 &ushr_32 ($T1,$T0,$sigma0[2]); 505 &eor_8 ($T1,$T1,$T2); 514 &eor_8 ($T1,$T1,$T3); # sigma0(X[1..4]) 526 &add_32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4]) [all …]
|
/Linux-v4.19/arch/sparc/crypto/ |
D | aes_asm.S | 7 #define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \ argument 9 AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \ 10 AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \ 11 AES_EROUND23(KEY_BASE + 6, T0, T1, I1) 13 #define ENCRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \ argument 15 AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \ 18 AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \ 19 AES_EROUND23(KEY_BASE + 6, T0, T1, I1) \ 23 #define ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE, I0, I1, T0, T1) \ argument 25 AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \ [all …]
|
/Linux-v4.19/drivers/block/drbd/ |
D | drbd_state.h | 40 #define NS2(T1, S1, T2, S2) \ argument 41 ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ 43 ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ 45 #define NS3(T1, S1, T2, S2, T3, S3) \ argument 46 ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ 48 ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ 53 #define _NS2(D, T1, S1, T2, S2) \ argument 54 D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \ 56 #define _NS3(D, T1, S1, T2, S2, T3, S3) \ argument 57 D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
|
/Linux-v4.19/crypto/ |
D | anubis.c | 119 static const u32 T1[256] = { variable 539 inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; in anubis_setkey() 565 T1[T4[(v >> 16) & 0xff] & 0xff] ^ in anubis_setkey() 597 T1[(state[1] >> 24) ] ^ in anubis_crypt() 603 T1[(state[1] >> 16) & 0xff] ^ in anubis_crypt() 609 T1[(state[1] >> 8) & 0xff] ^ in anubis_crypt() 615 T1[(state[1] ) & 0xff] ^ in anubis_crypt() 631 (T1[(state[1] >> 24) ] & 0x00ff0000U) ^ in anubis_crypt() 637 (T1[(state[1] >> 16) & 0xff] & 0x00ff0000U) ^ in anubis_crypt() 643 (T1[(state[1] >> 8) & 0xff] & 0x00ff0000U) ^ in anubis_crypt() [all …]
|
D | khazad.c | 127 static const u64 T1[256] = { variable 772 T1[(int)(K1 >> 48) & 0xff] ^ in khazad_setkey() 788 T1[(int)S[(int)(K1 >> 48) & 0xff] & 0xff] ^ in khazad_setkey() 814 T1[(int)(state >> 48) & 0xff] ^ in khazad_crypt() 825 (T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^ in khazad_crypt()
|
/Linux-v4.19/drivers/isdn/hardware/avm/ |
D | Kconfig | 31 tristate "AVM T1/T1-B ISA support" 34 Enable support for the AVM T1 T1B card. 51 tristate "AVM T1/T1-B PCI support" 54 Enable support for the AVM T1 T1B card.
|
/Linux-v4.19/arch/mips/mm/ |
D | page.c | 46 #define T1 9 macro 477 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page() 485 build_copy_store(&buf, T1, off + copy_word_size); in build_copy_page() 499 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page() 507 build_copy_store(&buf, T1, off + copy_word_size); in build_copy_page() 524 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page() 530 build_copy_store(&buf, T1, off + copy_word_size); in build_copy_page() 542 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page() 548 build_copy_store(&buf, T1, off + copy_word_size); in build_copy_page() 566 build_copy_load(&buf, T1, off + copy_word_size); in build_copy_page() [all …]
|
/Linux-v4.19/arch/mips/kvm/ |
D | entry.c | 32 #define T1 9 macro 39 #define T1 13 macro 347 uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT, in kvm_mips_build_enter_guest() 349 uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT, in kvm_mips_build_enter_guest() 365 UASM_i_ADDIU(&p, T1, S0, in kvm_mips_build_enter_guest() 375 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, in kvm_mips_build_enter_guest() 378 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, in kvm_mips_build_enter_guest() 388 UASM_i_ADDU(&p, T3, T1, T2); in kvm_mips_build_enter_guest() 414 (int)offsetof(struct mm_struct, context.asid), T1); in kvm_mips_build_enter_guest()
|