Lines Matching refs:TMP1
200 .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
211 movdqa \TMP2, \TMP1
213 psrldq $8, \TMP1
218 pshufd $0x24, \TMP1, \TMP2
225 pshufd $78, \TMP3, \TMP1
226 pxor \TMP3, \TMP1
227 movdqu \TMP1, HashKey_k(%arg2)
229 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
233 pshufd $78, \TMP5, \TMP1
234 pxor \TMP5, \TMP1
235 movdqu \TMP1, HashKey_2_k(%arg2)
237 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
240 pshufd $78, \TMP5, \TMP1
241 pxor \TMP5, \TMP1
242 movdqu \TMP1, HashKey_3_k(%arg2)
244 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
247 pshufd $78, \TMP5, \TMP1
248 pxor \TMP5, \TMP1
249 movdqu \TMP1, HashKey_4_k(%arg2)
515 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
516 movdqa \GH, \TMP1
521 PCLMULQDQ 0x11, \HK, \TMP1 # TMP1 = a1*b1
525 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0)
530 pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK
563 pxor \TMP1, \GH # result is in TMP1
599 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
613 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
626 READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
629 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
792 .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
804 MOVADQ ONE(%RIP),\TMP1
807 paddd \TMP1, \XMM0 # INCR Y0
822 MOVADQ (%r10),\TMP1
824 AESENC \TMP1, %xmm\index
830 MOVADQ (%r10), \TMP1
832 AESENCLAST \TMP1, %xmm\index # Last Round
835 movdqu (%arg4 , %r11, 1), \TMP1
836 pxor \TMP1, %xmm\index
842 movdqa \TMP1, %xmm\index
854 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
856 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
858 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
861 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
863 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
866 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
876 MOVADQ ONE(%RIP),\TMP1
877 paddd \TMP1, \XMM0 # INCR Y0
881 paddd \TMP1, \XMM0 # INCR Y0
885 paddd \TMP1, \XMM0 # INCR Y0
889 paddd \TMP1, \XMM0 # INCR Y0
893 MOVADQ 0(%arg1),\TMP1
894 pxor \TMP1, \XMM1
895 pxor \TMP1, \XMM2
896 pxor \TMP1, \XMM3
897 pxor \TMP1, \XMM4
899 movaps 0x10*\index(%arg1), \TMP1
900 AESENC \TMP1, \XMM1
901 AESENC \TMP1, \XMM2
902 AESENC \TMP1, \XMM3
903 AESENC \TMP1, \XMM4
906 movaps 0x10*\index(%arg1), \TMP1
907 AESENC \TMP1, \XMM1
908 AESENC \TMP1, \XMM2
909 AESENC \TMP1, \XMM3
910 AESENC \TMP1, \XMM4
933 movdqu 16*0(%arg4 , %r11 , 1), \TMP1
934 pxor \TMP1, \XMM1
937 movdqa \TMP1, \XMM1
939 movdqu 16*1(%arg4 , %r11 , 1), \TMP1
940 pxor \TMP1, \XMM2
943 movdqa \TMP1, \XMM2
945 movdqu 16*2(%arg4 , %r11 , 1), \TMP1
946 pxor \TMP1, \XMM3
949 movdqa \TMP1, \XMM3
951 movdqu 16*3(%arg4 , %r11 , 1), \TMP1
952 pxor \TMP1, \XMM4
955 movdqa \TMP1, \XMM4
981 .macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
1017 movaps 0x10(%arg1), \TMP1
1018 AESENC \TMP1, \XMM1 # Round 1
1019 AESENC \TMP1, \XMM2
1020 AESENC \TMP1, \XMM3
1021 AESENC \TMP1, \XMM4
1022 movaps 0x20(%arg1), \TMP1
1023 AESENC \TMP1, \XMM1 # Round 2
1024 AESENC \TMP1, \XMM2
1025 AESENC \TMP1, \XMM3
1026 AESENC \TMP1, \XMM4
1027 movdqa \XMM6, \TMP1
1031 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
1050 pxor \TMP1, \TMP4
1054 movdqa \XMM7, \TMP1
1061 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1080 pxor \TMP1, \TMP4
1088 movdqa \XMM8, \TMP1
1092 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1139 pxor \TMP4, \TMP1
1142 pxor \TMP1, \TMP2
1148 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
1178 pxor \TMP1, \XMM5 # result is in TMP1
1189 .macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
1225 movaps 0x10(%arg1), \TMP1
1226 AESENC \TMP1, \XMM1 # Round 1
1227 AESENC \TMP1, \XMM2
1228 AESENC \TMP1, \XMM3
1229 AESENC \TMP1, \XMM4
1230 movaps 0x20(%arg1), \TMP1
1231 AESENC \TMP1, \XMM1 # Round 2
1232 AESENC \TMP1, \XMM2
1233 AESENC \TMP1, \XMM3
1234 AESENC \TMP1, \XMM4
1235 movdqa \XMM6, \TMP1
1239 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
1258 pxor \TMP1, \TMP4
1262 movdqa \XMM7, \TMP1
1269 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1288 pxor \TMP1, \TMP4
1296 movdqa \XMM8, \TMP1
1300 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1351 pxor \TMP4, \TMP1
1354 pxor \TMP1, \TMP2
1360 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
1390 pxor \TMP1, \XMM5 # result is in TMP1
1396 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1412 # Multiply TMP1 * HashKey (using Karatsuba)
1414 movdqa \XMM2, \TMP1
1418 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1422 pxor \TMP1, \TMP6
1427 # Multiply TMP1 * HashKey (using Karatsuba)
1429 movdqa \XMM3, \TMP1
1433 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1437 pxor \TMP1, \TMP6
1441 # Multiply TMP1 * HashKey (using Karatsuba)
1442 movdqa \XMM4, \TMP1
1446 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1450 pxor \TMP1, \TMP6
1497 .macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1
1506 MOVADQ (%r10),\TMP1
1507 AESENC \TMP1,\XMM0
1512 MOVADQ (%r10),\TMP1
1513 AESENCLAST \TMP1,\XMM0