/Linux-v4.19/arch/powerpc/crypto/ |
D | sha1-powerpc-asm.S | 34 #define W(t) (((t)%16)+16) macro 37 LWZ(W(t),(t)*4,r4) 46 add r14,r0,W(t); \ 47 LWZ(W((t)+4),((t)+4)*4,r4); \ 58 xor r5,W((t)+4-3),W((t)+4-8); \ 60 xor W((t)+4),W((t)+4-16),W((t)+4-14); \ 61 add r0,r0,W(t); \ 62 xor W((t)+4),W((t)+4),r5; \ 64 rotlwi W((t)+4),W((t)+4),1 73 add r0,r0,W(t); \ [all …]
|
/Linux-v4.19/crypto/ |
D | sha256_generic.c | 61 static inline void LOAD_OP(int I, u32 *W, const u8 *input) in LOAD_OP() argument 63 W[I] = get_unaligned_be32((__u32 *)input + I); in LOAD_OP() 66 static inline void BLEND_OP(int I, u32 *W) in BLEND_OP() argument 68 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; in BLEND_OP() 74 u32 W[64]; in sha256_transform() local 79 LOAD_OP(i, W, input); in sha256_transform() 83 BLEND_OP(i, W); in sha256_transform() 90 t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0]; in sha256_transform() 92 t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1]; in sha256_transform() 94 t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2]; in sha256_transform() [all …]
|
D | sha512_generic.c | 93 static inline void LOAD_OP(int I, u64 *W, const u8 *input) in LOAD_OP() argument 95 W[I] = get_unaligned_be64((__u64 *)input + I); in LOAD_OP() 98 static inline void BLEND_OP(int I, u64 *W) in BLEND_OP() argument 100 W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]); in BLEND_OP() 109 u64 W[16]; in sha512_transform() local 123 LOAD_OP(i + j, W, input); in sha512_transform() 126 BLEND_OP(i + j, W); in sha512_transform() 131 t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; in sha512_transform() 133 t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; in sha512_transform() 135 t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; in sha512_transform() [all …]
|
/Linux-v4.19/lib/ |
D | sha256.c | 38 static inline void LOAD_OP(int I, u32 *W, const u8 *input) in LOAD_OP() argument 40 W[I] = __be32_to_cpu(((__be32 *)(input))[I]); in LOAD_OP() 43 static inline void BLEND_OP(int I, u32 *W) in BLEND_OP() argument 45 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; in BLEND_OP() 51 u32 W[64]; in sha256_transform() local 56 LOAD_OP(i, W, input); in sha256_transform() 60 BLEND_OP(i, W); in sha256_transform() 67 t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0]; in sha256_transform() 69 t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1]; in sha256_transform() 71 t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; in sha256_transform() [all …]
|
/Linux-v4.19/arch/arm/kvm/hyp/ |
D | hyp-entry.S | 66 W(b) hyp_reset 67 W(b) hyp_undef 68 W(b) hyp_svc 69 W(b) hyp_pabt 70 W(b) hyp_dabt 71 W(b) hyp_hvc 72 W(b) hyp_irq 73 W(b) hyp_fiq 84 W(add) sp, sp, #1 /* Reset 7 */ 85 W(add) sp, sp, #1 /* Undef 6 */ [all …]
|
/Linux-v4.19/arch/x86/kernel/ |
D | uprobes.c | 59 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ macro 102 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */ 103 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ 104 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ 105 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ 106 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 107 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 108 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ 109 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ 110 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ [all …]
|
/Linux-v4.19/arch/x86/crypto/ |
D | sha1_ssse3_asm.S | 315 .set W, W0 define 323 .set W_minus_32, W 334 .set W_minus_04, W 335 .set W, W_minus_32 define 356 movdqa W_TMP1, W 378 movdqa W_minus_12, W 379 palignr $8, W_minus_16, W # w[i-14] 382 pxor W_minus_08, W 385 pxor W_TMP1, W 386 movdqa W, W_TMP2 [all …]
|
D | sha512-ssse3-asm.S | 102 # W[t]+K[t] (stack frame) 129 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 133 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h 135 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 158 # Two rounds are computed based on the values for K[t-2]+W[t-2] and 159 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message 166 # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} 175 movdqa W_t(idx), %xmm2 # XMM2 = W[t-2] 178 movdqa %xmm2, %xmm0 # XMM0 = W[t-2] 183 movdqu W_t(idx), %xmm5 # XMM5 = W[t-15] [all …]
|
D | sha512-avx-asm.S | 78 # W[t] + K[t] | W[t+1] + K[t+1] 105 # W[t]+K[t] (stack frame) 136 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 140 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h 142 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 164 # Two rounds are computed based on the values for K[t-2]+W[t-2] and 165 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message 172 # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} 177 vmovdqa W_t(idx), %xmm4 # XMM4 = W[t-2] 179 vmovdqu W_t(idx), %xmm5 # XMM5 = W[t-15] [all …]
|
D | sha512-avx2-asm.S | 171 MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7] 173 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16] 175 MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15] 182 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 184 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7 228 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 230 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 235 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 237 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} 239 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} [all …]
|
D | sha256-ssse3-asm.S | 149 ## compute W[-16] + W[-7] 4 at a time 154 palignr $4, X2, XTMP0 # XTMP0 = W[-7] 162 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16] 167 palignr $4, X0, XTMP1 # XTMP1 = W[-15] 171 movdqa XTMP1, XTMP2 # XTMP2 = W[-15] 175 movdqa XTMP1, XTMP3 # XTMP3 = W[-15] 186 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 191 movdqa XTMP3, XTMP2 # XTMP2 = W[-15] 194 movdqa XTMP3, XTMP4 # XTMP4 = W[-15] 211 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3 [all …]
|
D | sha256-avx-asm.S | 156 ## compute W[-16] + W[-7] 4 at a time 161 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7] 168 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16] 173 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15] 190 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 204 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3 216 vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 225 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} 228 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 240 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} [all …]
|
/Linux-v4.19/arch/arm/crypto/ |
D | sha1-armv7-neon.S | 92 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 94 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 98 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 102 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 106 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 108 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 112 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 115 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 119 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 121 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ [all …]
|
/Linux-v4.19/tools/bpf/bpftool/bash-completion/ |
D | bpftool | 51 COMPREPLY+=( $( compgen -W "$w" -- "$cur" ) ) 73 COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) ) 78 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ 84 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ 92 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ 98 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ 111 COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) ) 122 COMPREPLY+=( $( compgen -W "$(seq 0 $((nmaps - 1)))" -- "$cur" ) ) 127 COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \ 188 COMPREPLY=( $( compgen -W "$c" -- "$cur" ) ) [all …]
|
/Linux-v4.19/tools/memory-model/ |
D | linux-kernel.def | 32 cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) 33 cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) 34 cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) 35 cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) 100 atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) 101 atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) 102 atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) 103 atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
|
/Linux-v4.19/arch/arm/kernel/ |
D | hyp-stub.S | 129 W(adr) r7, __hyp_stub_vectors 276 __hyp_stub_reset: W(b) . 277 __hyp_stub_und: W(b) . 278 __hyp_stub_svc: W(b) . 279 __hyp_stub_pabort: W(b) . 280 __hyp_stub_dabort: W(b) . 281 __hyp_stub_trap: W(b) __hyp_stub_do_trap 282 __hyp_stub_irq: W(b) . 283 __hyp_stub_fiq: W(b) .
|
/Linux-v4.19/arch/arm/lib/ |
D | memmove.S | 88 6: W(nop) 89 W(ldr) r3, [r1, #-4]! 90 W(ldr) r4, [r1, #-4]! 91 W(ldr) r5, [r1, #-4]! 92 W(ldr) r6, [r1, #-4]! 93 W(ldr) r7, [r1, #-4]! 94 W(ldr) r8, [r1, #-4]! 95 W(ldr) lr, [r1, #-4]! 99 W(nop) 100 W(str) r3, [r0, #-4]! [all …]
|
/Linux-v4.19/arch/m68k/fpsp040/ |
D | slogn.S | 436 |--LET V=U*U, W=V*V, CALCULATE 438 |--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] ) 443 fmulx %fp1,%fp1 | ...FP1 IS W 448 fmulx %fp1,%fp3 | ...W*B5 449 fmulx %fp1,%fp2 | ...W*B4 451 faddd LOGB3,%fp3 | ...B3+W*B5 452 faddd LOGB2,%fp2 | ...B2+W*B4 454 fmulx %fp3,%fp1 | ...W*(B3+W*B5), FP3 RELEASED 456 fmulx %fp0,%fp2 | ...V*(B2+W*B4) 458 faddd LOGB1,%fp1 | ...B1+W*(B3+W*B5) [all …]
|
/Linux-v4.19/tools/memory-model/Documentation/ |
D | cheatsheet.txt | 3 C Self R W RMW Self R W DR DW RMW SV 11 Successful *_release() C Y Y Y W Y 13 smp_wmb() Y W Y Y W 23 W: Write, for example, WRITE_ONCE(), or write portion of RMW
|
/Linux-v4.19/arch/arm/boot/compressed/ |
D | head.S | 140 W(b) 1f 916 W(b) __armv4_mmu_cache_on 917 W(b) __armv4_mmu_cache_off 923 W(b) __armv3_mpu_cache_on 924 W(b) __armv3_mpu_cache_off 925 W(b) __armv3_mpu_cache_flush 929 W(b) __armv4_mpu_cache_on 930 W(b) __armv4_mpu_cache_off 931 W(b) __armv4_mpu_cache_flush 935 W(b) __arm926ejs_mmu_cache_on [all …]
|
/Linux-v4.19/arch/x86/kernel/kprobes/ |
D | core.c | 74 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ macro 90 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ 91 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */ 92 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ 93 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ 94 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 95 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ 96 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ 97 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ 98 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ [all …]
|
/Linux-v4.19/drivers/gpu/drm/i915/selftests/ |
D | igt_wedge_me.h | 53 #define igt_wedge_on_timeout(W, DEV, TIMEOUT) \ argument 54 for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \ 55 (W)->i915; \ 56 __igt_fini_wedge((W)))
|
/Linux-v4.19/arch/arm/kvm/ |
D | init.S | 56 W(b) . 57 W(b) . 58 W(b) . 59 W(b) . 60 W(b) . 61 W(b) __do_hyp_init 62 W(b) . 63 W(b) .
|
/Linux-v4.19/ |
D | CREDITS | 5 (W), PGP key ID and fingerprint (P), description (D), and 29 W: http://www.arbornet.org/~dragos 37 W: http://alumnus.caltech.edu/~madler/ 49 W: http://www.csn.ul.ie/~airlied 57 W: http://www.moses.uklinux.net/patches 65 W: http://www.almesberger.net/ 80 W: http://www-stu.christs.cam.ac.uk/~aia21/ 88 W: http://www.pdos.lcs.mit.edu/~cananian 99 W: http://www.codepoet.org/ 109 W: http://www.subcarrier.org/mang [all …]
|
/Linux-v4.19/arch/arm64/ |
D | Kconfig.debug | 43 bool "Warn on W+X mappings at boot" 46 Generate a warning if any W+X mappings are found at boot. 49 W+X mappings after applying NX, as such mappings are a security risk. 55 arm64/mm: Checked W+X mappings: passed, no W+X pages found. 59 arm64/mm: Checked W+X mappings: FAILED, <N> W+X pages found. 62 still fine, as W+X mappings are not a security hole in
|