/Linux-v4.19/arch/x86/crypto/ |
D | glue_helper-asm-avx.S | 18 #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 25 vmovdqu (6*16)(src), x6; \ 28 #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 35 vmovdqu x6, (6*16)(dst); \ 38 #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 44 vpxor (5*16)(src), x6, x6; \ 46 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7); 54 #define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \ argument 75 vpshufb t1, x7, x6; \ 82 #define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument [all …]
|
D | glue_helper-asm-avx2.S | 13 #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 20 vmovdqu (6*32)(src), x6; \ 23 #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 30 vmovdqu x6, (6*32)(dst); \ 33 #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ argument 42 vpxor (5*32+16)(src), x6, x6; \ 44 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7); 60 #define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \ argument 86 vpshufb t1, t2, x6; \ 93 #define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument [all …]
|
D | camellia-aesni-avx-asm_64.S | 51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 69 vpshufb t4, x6, x6; \ 84 filter_8bit(x6, t2, t3, t7, t6); \ 96 vaesenclast t4, x6, x6; \ 104 filter_8bit(x6, t0, t1, t7, t6); \ 136 vpxor x6, x1, x1; \ 142 vpxor x0, x6, x6; \ 148 vpxor x6, x3, x3; \ 152 vpxor x1, x6, x6; \ 169 vpxor t1, x6, x6; \ [all …]
|
D | camellia-aesni-avx2-asm_64.S | 68 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 84 vpshufb t4, x6, x6; \ 97 filter_8bit(x6, t2, t3, t7, t4); \ 99 vextracti128 $1, x6, t2##_x; \ 119 vaesenclast t4##_x, x6##_x, x6##_x; \ 121 vinserti128 $1, t2##_x, x6, x6; \ 145 filter_8bit(x6, t0, t1, t7, t6); \ 167 vpxor x6, x1, x1; \ 179 vpxor x0, x6, x6; \ 189 vpxor x6, x3, x3; \ [all …]
|
D | chacha20-avx2-x86_64.S | 95 # x2 += x6, x14 = rotl32(x14 ^ x2, 16) 118 # x10 += x14, x6 = rotl32(x6 ^ x10, 12) 141 # x2 += x6, x14 = rotl32(x14 ^ x2, 8) 164 # x10 += x14, x6 = rotl32(x6 ^ x10, 7) 182 # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0 204 # x11 += x12, x6 = rotl32(x6 ^ x11, 12) 228 # x1 += x6, x12 = rotl32(x12 ^ x1, 8) 250 # x11 += x12, x6 = rotl32(x6 ^ x11, 7)
|
D | chacha20-ssse3-x86_64.S | 220 # x2 += x6, x14 = rotl32(x14 ^ x2, 16) 247 # x10 += x14, x6 = rotl32(x6 ^ x10, 12) 274 # x2 += x6, x14 = rotl32(x14 ^ x2, 8) 301 # x10 += x14, x6 = rotl32(x6 ^ x10, 7) 322 # x1 += x6, x12 = rotl32(x12 ^ x1, 16) 348 # x11 += x12, x6 = rotl32(x6 ^ x11, 12) 376 # x1 += x6, x12 = rotl32(x12 ^ x1, 8) 402 # x11 += x12, x6 = rotl32(x6 ^ x11, 7) 453 # x6[0-3] += s1[2]
|
/Linux-v4.19/arch/arm64/crypto/ |
D | aes-neonbs-core.S | 116 .macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \ 128 eor \t0, \x4, \x6 130 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 135 eor \x6, \x6, \t0 140 .macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \ 142 eor \t3, \x4, \x6 145 eor \s1, \x7, \x6 169 and \s1, \x6, \x2 188 mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ 230 .macro add_round_key, x0, x1, x2, x3, x4, x5, x6, x7 [all …]
|
D | aes-modes.S | 122 enc_prepare w22, x21, x6 129 encrypt_block v0, w22, x21, x6, w7 131 encrypt_block v1, w22, x21, x6, w7 133 encrypt_block v2, w22, x21, x6, w7 135 encrypt_block v3, w22, x21, x6, w7 147 encrypt_block v4, w22, x21, x6, w7 170 dec_prepare w22, x21, x6 196 decrypt_block v0, w22, x21, x6, w7 225 enc_prepare w22, x21, x6 228 umov x6, v4.d[1] /* keep swabbed ctr in reg */ [all …]
|
D | aes-ce-ccm-core.S | 38 add x6, x4, #16 45 ld1 {v5.4s}, [x6], #16 /* load 2nd round key */ 48 4: ld1 {v3.4s}, [x6], #16 /* load next round key */ 51 5: ld1 {v4.4s}, [x6], #16 /* load next round key */ 55 ld1 {v5.4s}, [x6], #16 /* load next round key */ 129 ldr x8, [x6, #8] /* load lower ctr */ 133 ld1 {v1.8b}, [x6] /* load upper ctr */ 183 str x8, [x6, #8] /* store lsb end of ctr (BE) */
|
D | sha512-core.S_shipped | 101 eor x6,x24,x24,ror#23 107 eor x16,x16,x6,ror#18 // Sigma1(e) 108 ror x6,x20,#28 115 eor x17,x6,x17,ror#34 // Sigma0(a) 122 ldp x5,x6,[x1],#2*8 169 rev x6,x6 // 3 178 add x24,x24,x6 // h+=X[i] 369 str x6,[sp,#24] 372 eor x6,x21,x21,ror#23 378 eor x16,x16,x6,ror#18 // Sigma1(e) [all …]
|
D | speck-neon-core.S | 286 mov x6, ROUND_KEYS 290 ld1r {ROUND_KEY.\lanes}, [x6] 291 sub x6, x6, #( \n / 8 ) 294 ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
|
/Linux-v4.19/arch/arm/crypto/ |
D | aes-neonbs-core.S | 176 .macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \ 188 veor \t0, \x4, \x6 190 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 195 veor \x6, \x6, \t0 200 .macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \ 202 veor \t3, \x4, \x6 205 veor \s1, \x7, \x6 229 vand \s1, \x6, \x2 248 mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ 268 .macro shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, \ [all …]
|
/Linux-v4.19/arch/arm64/lib/ |
D | copy_page.S | 41 ldp x6, x7, [x1, #32] 61 stnp x6, x7, [x0, #32] 62 ldp x6, x7, [x1, #32] 81 stnp x6, x7, [x0, #32]
|
/Linux-v4.19/arch/arm64/kernel/ |
D | head.S | 336 adr_l x6, idmap_t0sz 337 str x5, [x6] 339 dc ivac, x6 // Invalidate potentially stale cache line 357 create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6 369 adr_l x6, __idmap_text_end // __pa(__idmap_text_end) 371 map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14 380 adrp x6, _end // runtime __pa(_end) 382 sub x6, x6, x3 // _end - _text 383 add x6, x6, x5 // runtime __va(_end) 385 map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
|
D | hibernate-asm.S | 83 break_before_make_ttbr_switch x5, x0, x6 97 copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 114 break_before_make_ttbr_switch x25, x21, x6
|
D | entry-ftrace.S | 178 stp x6, x7, [sp, #48] 186 ldp x6, x7, [sp, #48]
|
/Linux-v4.19/arch/arm64/kvm/ |
D | hyp-init.S | 96 tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6 153 ldr x6, =SCTLR_ELx_FLAGS 154 bic x5, x5, x6 // Clear SCTL_M and etc
|
/Linux-v4.19/arch/arm64/boot/dts/xilinx/ |
D | zynqmp-zcu102-rev1.0.dts | 26 reg = <0x20 0x6>; 30 reg = <0xd0 0x6>;
|
/Linux-v4.19/arch/arm64/mm/ |
D | proc.S | 74 mrs x6, tcr_el1 87 stp x5, x6, [x0, #32] 103 ldp x6, x8, [x0, #32] 110 msr cpacr_el1, x6 239 end_pgdp .req x6 449 tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
|
/Linux-v4.19/arch/arm64/xen/ |
D | hypercall.S | 104 uaccess_ttbr0_enable x6, x7, x8 110 uaccess_ttbr0_disable x6, x7
|
/Linux-v4.19/Documentation/devicetree/bindings/gpio/ |
D | brcm,brcmstb-gpio.txt | 66 interrupts = <0x6>; 78 interrupts = <0x6>; 79 interrupts-extended = <&irq0_aon_intc 0x6>,
|
/Linux-v4.19/Documentation/input/devices/ |
D | alps.rst | 110 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 122 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 140 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 164 byte 1: 0 x10 x9 x8 x7 x6 x5 x4 180 byte 1: 0 x8 x7 x6 x5 x4 x3 x2 193 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 208 byte 1: 0 x10 x9 x8 x7 x6 x5 x4 220 byte 0: 0 1 x7 x6 x5 x4 x3 x2 256 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 268 byte 4: 0 x9 x8 x7 x6 x5 x4 x3 [all …]
|
/Linux-v4.19/arch/sparc/lib/ |
D | NG2memcpy.S | 86 #define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \ argument 92 faligndata %x5, %x6, %f10; \ 93 faligndata %x6, %x7, %f12; \ 123 #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ argument 130 fsrc2 %x6, %f12; 131 #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ argument 138 fsrc2 %x6, %f12; \ 167 #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ argument 174 EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1);
|
/Linux-v4.19/arch/arm64/kernel/probes/ |
D | kprobes_trampoline.S | 16 stp x6, x7, [sp, #S_X6] 50 ldp x6, x7, [sp, #S_X6]
|
/Linux-v4.19/arch/powerpc/boot/dts/ |
D | sbc8548.dts | 31 0x6 0x0 0xec000000 0x04000000>; /*64MB Flash*/ 91 reg = <0x6 0x0 0x04000000>;
|