/Linux-v5.10/arch/x86/crypto/ |
D | glue_helper-asm-avx.S | 8 #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 15 vmovdqu (6*16)(src), x6; \ 18 #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 25 vmovdqu x6, (6*16)(dst); \ 28 #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 34 vpxor (5*16)(src), x6, x6; \ 36 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7); 44 #define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \ argument 65 vpshufb t1, x7, x6; \ 72 #define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument [all …]
|
D | glue_helper-asm-avx2.S | 8 #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 15 vmovdqu (6*32)(src), x6; \ 18 #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 25 vmovdqu x6, (6*32)(dst); \ 28 #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ argument 37 vpxor (5*32+16)(src), x6, x6; \ 39 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7); 55 #define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \ argument 81 vpshufb t1, t2, x6; \ 88 #define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument [all …]
|
D | camellia-aesni-avx-asm_64.S | 51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 69 vpshufb t4, x6, x6; \ 84 filter_8bit(x6, t2, t3, t7, t6); \ 96 vaesenclast t4, x6, x6; \ 104 filter_8bit(x6, t0, t1, t7, t6); \ 136 vpxor x6, x1, x1; \ 142 vpxor x0, x6, x6; \ 148 vpxor x6, x3, x3; \ 152 vpxor x1, x6, x6; \ 169 vpxor t1, x6, x6; \ [all …]
|
D | camellia-aesni-avx2-asm_64.S | 63 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 79 vpshufb t4, x6, x6; \ 92 filter_8bit(x6, t2, t3, t7, t4); \ 94 vextracti128 $1, x6, t2##_x; \ 114 vaesenclast t4##_x, x6##_x, x6##_x; \ 116 vinserti128 $1, t2##_x, x6, x6; \ 140 filter_8bit(x6, t0, t1, t7, t6); \ 162 vpxor x6, x1, x1; \ 174 vpxor x0, x6, x6; \ 184 vpxor x6, x3, x3; \ [all …]
|
D | chacha-ssse3-x86_64.S | 296 # x2 += x6, x14 = rotl32(x14 ^ x2, 16) 323 # x10 += x14, x6 = rotl32(x6 ^ x10, 12) 350 # x2 += x6, x14 = rotl32(x14 ^ x2, 8) 377 # x10 += x14, x6 = rotl32(x6 ^ x10, 7) 398 # x1 += x6, x12 = rotl32(x12 ^ x1, 16) 424 # x11 += x12, x6 = rotl32(x6 ^ x11, 12) 452 # x1 += x6, x12 = rotl32(x12 ^ x1, 8) 478 # x11 += x12, x6 = rotl32(x6 ^ x11, 7) 529 # x6[0-3] += s1[2]
|
/Linux-v5.10/arch/arm64/boot/dts/microchip/ |
D | sparx5.dtsi | 87 reg = <0x6 0x1110000c 0x24>; 114 reg = <0x6 0x00300000 0x10000>, /* GIC Dist */ 115 <0x6 0x00340000 0xc0000>, /* GICR */ 116 <0x6 0x00200000 0x2000>, /* GICC */ 117 <0x6 0x00210000 0x2000>, /* GICV */ 118 <0x6 0x00220000 0x2000>; /* GICH */ 125 reg = <0x6 0x00000000 0xd0>; 142 reg = <0x6 0x00100000 0x20>; 155 reg = <0x6 0x00102000 0x20>; 168 reg = <0x6 0x00104000 0x40>; [all …]
|
/Linux-v5.10/arch/arm64/crypto/ |
D | aes-neonbs-core.S | 113 .macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \ 125 eor \t0, \x4, \x6 127 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 132 eor \x6, \x6, \t0 137 .macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \ 139 eor \t3, \x4, \x6 142 eor \s1, \x7, \x6 166 and \s1, \x6, \x2 185 mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ 227 .macro add_round_key, x0, x1, x2, x3, x4, x5, x6, x7 [all …]
|
D | poly1305-core.S_shipped | 70 ldp x6,x17,[x0,#16] // [along with is_base2_26] 78 lsr x16,x6,#32 100 csel x6,x6,x14,eq 113 adc x6,x6,x3 130 mul x10,x6,x9 // h2*5*r1 132 mul x11,x6,x7 // h2*r0 138 and x6,x14,#3 142 adc x6,x6,xzr 147 stp x6,xzr,[x0,#16] // [and clear is_base2_26] 158 ldp x6,x7,[x0,#16] // [along with is_base2_26] [all …]
|
D | aes-modes.S | 133 enc_prepare w8, x6, x7 134 encrypt_block v4, w8, x6, x7, w9 135 enc_switch_key w3, x2, x6 140 enc_prepare w3, x2, x6 147 encrypt_block v0, w3, x2, x6, w7 149 encrypt_block v1, w3, x2, x6, w7 151 encrypt_block v2, w3, x2, x6, w7 153 encrypt_block v3, w3, x2, x6, w7 163 encrypt_block v4, w3, x2, x6, w7 180 enc_prepare w8, x6, x7 [all …]
|
D | aes-ce-ccm-core.S | 35 add x6, x4, #16 42 ld1 {v5.4s}, [x6], #16 /* load 2nd round key */ 45 4: ld1 {v3.4s}, [x6], #16 /* load next round key */ 48 5: ld1 {v4.4s}, [x6], #16 /* load next round key */ 52 ld1 {v5.4s}, [x6], #16 /* load next round key */ 127 ldr x8, [x6, #8] /* load lower ctr */ 131 ld1 {v1.8b}, [x6] /* load upper ctr */ 181 str x8, [x6, #8] /* store lsb end of ctr (BE) */
|
D | sha512-core.S_shipped | 101 eor x6,x24,x24,ror#23 107 eor x16,x16,x6,ror#18 // Sigma1(e) 108 ror x6,x20,#28 115 eor x17,x6,x17,ror#34 // Sigma0(a) 122 ldp x5,x6,[x1],#2*8 169 rev x6,x6 // 3 178 add x24,x24,x6 // h+=X[i] 369 str x6,[sp,#24] 372 eor x6,x21,x21,ror#23 378 eor x16,x16,x6,ror#18 // Sigma1(e) [all …]
|
/Linux-v5.10/arch/arm64/lib/ |
D | crc32.S | 26 ldp x5, x6, [x8] 30 CPU_BE( rev x6, x6 ) 51 crc32\c\()x w8, w8, x6 57 ldp x5, x6, [x1, #-16] 61 CPU_BE( rev x6, x6 ) 65 crc32\c\()x w0, w0, x6
|
D | copy_page.S | 30 ldp x6, x7, [x1, #32] 50 stnp x6, x7, [x0, #32 - 256] 51 ldp x6, x7, [x1, #32] 70 stnp x6, x7, [x0, #32 - 256]
|
/Linux-v5.10/arch/arm64/kernel/ |
D | head.S | 311 mrs_s x6, SYS_ID_AA64MMFR2_EL1 312 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) 314 cbnz x6, 1f 318 adr_l x6, vabits_actual 319 str x5, [x6] 321 dc ivac, x6 // Invalidate potentially stale cache line 340 adr_l x6, idmap_t0sz 341 str x5, [x6] 343 dc ivac, x6 // Invalidate potentially stale cache line 361 create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6 [all …]
|
D | hibernate-asm.S | 73 break_before_make_ttbr_switch x5, x0, x6, x8 87 copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 104 break_before_make_ttbr_switch x25, x21, x6, x8
|
D | entry-ftrace.S | 44 stp x6, x7, [sp, #S_X6] 114 ldp x6, x7, [sp, #S_X6] 330 stp x6, x7, [sp, #48] 340 ldp x6, x7, [sp, #48]
|
/Linux-v5.10/arch/arm/crypto/ |
D | aes-neonbs-core.S | 168 .macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \ 180 veor \t0, \x4, \x6 182 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 187 veor \x6, \x6, \t0 192 .macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \ 194 veor \t3, \x4, \x6 197 veor \s1, \x7, \x6 221 vand \s1, \x6, \x2 240 mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ 260 .macro shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, \ [all …]
|
/Linux-v5.10/arch/arm64/kvm/hyp/nvhe/ |
D | host.S | 29 stp x6, x7, [x0, #CPU_XREG_OFFSET(6)] 47 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] 71 mov x6, x3 118 ldr_l x6, kimage_voffset 121 sub x5, x5, x6
|
/Linux-v5.10/tools/testing/selftests/arm64/fp/ |
D | sve-test.S | 249 mov x6, x1 251 _adrz x0, x6, 2 256 mov x0, x6 270 mov x6, x1 272 _adrp x0, x6, 2 277 mov x0, x6 371 mov x1, x6 380 mov x2, x6 396 mov x1, x6 405 mov x2, x6
|
D | fpsimd-test.S | 216 mov x6, x1 218 _adrv x0, x6, 2 223 mov x0, x6 287 mov x1, x6 296 mov x2, x6
|
/Linux-v5.10/arch/arm64/boot/dts/xilinx/ |
D | zynqmp-zcu102-rev1.0.dts | 26 reg = <0x20 0x6>; 30 reg = <0xd0 0x6>;
|
/Linux-v5.10/arch/arm64/xen/ |
D | hypercall.S | 104 uaccess_ttbr0_enable x6, x7, x8 110 uaccess_ttbr0_disable x6, x7
|
/Linux-v5.10/arch/arm64/mm/ |
D | proc.S | 74 mrs x6, cpacr_el1 88 stp x6, x7, [x0, #32] 109 ldp x6, x8, [x0, #32] 123 msr cpacr_el1, x6 234 end_pgdp .req x6 475 tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
|
/Linux-v5.10/Documentation/devicetree/bindings/gpio/ |
D | brcm,brcmstb-gpio.txt | 66 interrupts = <0x6>; 78 interrupts = <0x6>; 79 interrupts-extended = <&irq0_aon_intc 0x6>,
|
/Linux-v5.10/Documentation/input/devices/ |
D | alps.rst | 110 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 122 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 140 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 164 byte 1: 0 x10 x9 x8 x7 x6 x5 x4 180 byte 1: 0 x8 x7 x6 x5 x4 x3 x2 193 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 208 byte 1: 0 x10 x9 x8 x7 x6 x5 x4 220 byte 0: 0 1 x7 x6 x5 x4 x3 x2 256 byte 1: 0 x6 x5 x4 x3 x2 x1 x0 268 byte 4: 0 x9 x8 x7 x6 x5 x4 x3 [all …]
|