Home
last modified time | relevance | path

Searched refs:W (Results 1 – 25 of 294) sorted by relevance

12345678910>>...12

/Linux-v5.4/arch/powerpc/crypto/
Dsha1-powerpc-asm.S34 #define W(t) (((t)%16)+16) macro
37 LWZ(W(t),(t)*4,r4)
46 add r14,r0,W(t); \
47 LWZ(W((t)+4),((t)+4)*4,r4); \
58 xor r5,W((t)+4-3),W((t)+4-8); \
60 xor W((t)+4),W((t)+4-16),W((t)+4-14); \
61 add r0,r0,W(t); \
62 xor W((t)+4),W((t)+4),r5; \
64 rotlwi W((t)+4),W((t)+4),1
73 add r0,r0,W(t); \
[all …]
/Linux-v5.4/lib/crypto/
Dsha256.c36 static inline void LOAD_OP(int I, u32 *W, const u8 *input) in LOAD_OP() argument
38 W[I] = get_unaligned_be32((__u32 *)input + I); in LOAD_OP()
41 static inline void BLEND_OP(int I, u32 *W) in BLEND_OP() argument
43 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; in BLEND_OP()
49 u32 W[64]; in sha256_transform() local
54 LOAD_OP(i, W, input); in sha256_transform()
58 BLEND_OP(i, W); in sha256_transform()
65 t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0]; in sha256_transform()
67 t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1]; in sha256_transform()
69 t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; in sha256_transform()
[all …]
/Linux-v5.4/arch/arm/kvm/hyp/
Dhyp-entry.S54 W(b) hyp_reset
55 W(b) hyp_undef
56 W(b) hyp_svc
57 W(b) hyp_pabt
58 W(b) hyp_dabt
59 W(b) hyp_hvc
60 W(b) hyp_irq
61 W(b) hyp_fiq
72 W(add) sp, sp, #1 /* Reset 7 */
73 W(add) sp, sp, #1 /* Undef 6 */
[all …]
/Linux-v5.4/arch/x86/kernel/
Duprobes.c46 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ macro
89 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
90 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
91 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
96 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
[all …]
/Linux-v5.4/arch/x86/crypto/
Dsha1_ssse3_asm.S311 .set W, W0 define
319 .set W_minus_32, W
330 .set W_minus_04, W
331 .set W, W_minus_32 define
352 movdqa W_TMP1, W
374 movdqa W_minus_12, W
375 palignr $8, W_minus_16, W # w[i-14]
378 pxor W_minus_08, W
381 pxor W_TMP1, W
382 movdqa W, W_TMP2
[all …]
Dsha512-ssse3-asm.S102 # W[t]+K[t] (stack frame)
129 add WK_2(idx), T1 # W[t] + K[t] from message scheduler
133 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h
135 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e)
158 # Two rounds are computed based on the values for K[t-2]+W[t-2] and
159 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message
166 # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]}
175 movdqa W_t(idx), %xmm2 # XMM2 = W[t-2]
178 movdqa %xmm2, %xmm0 # XMM0 = W[t-2]
183 movdqu W_t(idx), %xmm5 # XMM5 = W[t-15]
[all …]
Dsha512-avx-asm.S78 # W[t] + K[t] | W[t+1] + K[t+1]
105 # W[t]+K[t] (stack frame)
136 add WK_2(idx), T1 # W[t] + K[t] from message scheduler
140 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h
142 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e)
164 # Two rounds are computed based on the values for K[t-2]+W[t-2] and
165 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message
172 # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]}
177 vmovdqa W_t(idx), %xmm4 # XMM4 = W[t-2]
179 vmovdqu W_t(idx), %xmm5 # XMM5 = W[t-15]
[all …]
Dsha512-avx2-asm.S171 MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7]
173 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16]
175 MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15]
182 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1
184 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7
228 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8
230 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7
235 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0
237 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA}
239 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00}
[all …]
Dsha256-ssse3-asm.S149 ## compute W[-16] + W[-7] 4 at a time
154 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
162 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
167 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
171 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
175 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
186 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
191 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
194 movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
211 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
[all …]
Dsha256-avx-asm.S156 ## compute W[-16] + W[-7] 4 at a time
161 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
168 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]
173 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
190 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7
204 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
216 vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR
225 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
228 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
240 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
[all …]
/Linux-v5.4/tools/bpf/bpftool/bash-completion/
Dbpftool22 COMPREPLY+=( $( compgen -W "$w" -- "$cur" ) )
44 COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) )
49 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
57 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
64 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
70 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
76 COMPREPLY+=( $( compgen -W "$( bpftool -jp btf 2>&1 | \
89 COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) )
100 COMPREPLY+=( $( compgen -W "$(seq 0 $((nmaps - 1)))" -- "$cur" ) )
105 COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \
[all …]
/Linux-v5.4/crypto/
Dsha512_generic.c88 static inline void LOAD_OP(int I, u64 *W, const u8 *input) in LOAD_OP() argument
90 W[I] = get_unaligned_be64((__u64 *)input + I); in LOAD_OP()
93 static inline void BLEND_OP(int I, u64 *W) in BLEND_OP() argument
95 W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]); in BLEND_OP()
104 u64 W[16]; in sha512_transform() local
118 LOAD_OP(i + j, W, input); in sha512_transform()
121 BLEND_OP(i + j, W); in sha512_transform()
126 t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; in sha512_transform()
128 t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; in sha512_transform()
130 t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; in sha512_transform()
[all …]
/Linux-v5.4/arch/arm/crypto/
Dsha1-armv7-neon.S88 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument
90 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
94 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
98 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
102 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument
104 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
108 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
111 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
115 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument
117 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
[all …]
/Linux-v5.4/tools/memory-model/
Dlinux-kernel.def34 cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
35 cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
36 cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
37 cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
108 atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
109 atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
110 atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
111 atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
/Linux-v5.4/arch/arm/kernel/
Dhyp-stub.S116 W(adr) r7, __hyp_stub_vectors
263 __hyp_stub_reset: W(b) .
264 __hyp_stub_und: W(b) .
265 __hyp_stub_svc: W(b) .
266 __hyp_stub_pabort: W(b) .
267 __hyp_stub_dabort: W(b) .
268 __hyp_stub_trap: W(b) __hyp_stub_do_trap
269 __hyp_stub_irq: W(b) .
270 __hyp_stub_fiq: W(b) .
/Linux-v5.4/arch/arm/lib/
Dmemmove.S85 6: W(nop)
86 W(ldr) r3, [r1, #-4]!
87 W(ldr) r4, [r1, #-4]!
88 W(ldr) r5, [r1, #-4]!
89 W(ldr) r6, [r1, #-4]!
90 W(ldr) r7, [r1, #-4]!
91 W(ldr) r8, [r1, #-4]!
92 W(ldr) lr, [r1, #-4]!
96 W(nop)
97 W(str) r3, [r0, #-4]!
[all …]
/Linux-v5.4/arch/m68k/fpsp040/
Dslogn.S436 |--LET V=U*U, W=V*V, CALCULATE
438 |--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
443 fmulx %fp1,%fp1 | ...FP1 IS W
448 fmulx %fp1,%fp3 | ...W*B5
449 fmulx %fp1,%fp2 | ...W*B4
451 faddd LOGB3,%fp3 | ...B3+W*B5
452 faddd LOGB2,%fp2 | ...B2+W*B4
454 fmulx %fp3,%fp1 | ...W*(B3+W*B5), FP3 RELEASED
456 fmulx %fp0,%fp2 | ...V*(B2+W*B4)
458 faddd LOGB1,%fp1 | ...B1+W*(B3+W*B5)
[all …]
/Linux-v5.4/tools/memory-model/Documentation/
Dcheatsheet.txt3 C Self R W RMW Self R W DR DW RMW SV
11 Successful *_release() C Y Y Y W Y
13 smp_wmb() Y W Y Y W
23 W: Write, for example, WRITE_ONCE(), or write portion of RMW
/Linux-v5.4/arch/x86/kernel/kprobes/
Dcore.c61 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ macro
77 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
78 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
79 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
80 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
81 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
82 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
83 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
84 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
85 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
[all …]
/Linux-v5.4/arch/arm/boot/compressed/
Dhead.S178 W(b) 1f
968 W(b) __armv4_mmu_cache_on
969 W(b) __armv4_mmu_cache_off
975 W(b) __armv3_mpu_cache_on
976 W(b) __armv3_mpu_cache_off
977 W(b) __armv3_mpu_cache_flush
981 W(b) __armv4_mpu_cache_on
982 W(b) __armv4_mpu_cache_off
983 W(b) __armv4_mpu_cache_flush
987 W(b) __arm926ejs_mmu_cache_on
[all …]
/Linux-v5.4/arch/arm/kvm/
Dinit.S44 W(b) .
45 W(b) .
46 W(b) .
47 W(b) .
48 W(b) .
49 W(b) __do_hyp_init
50 W(b) .
51 W(b) .
/Linux-v5.4/drivers/gpu/drm/i915/gt/
Dintel_reset.h64 #define intel_wedge_on_timeout(W, GT, TIMEOUT) \ argument
65 for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
66 (W)->gt; \
67 __intel_fini_wedge((W)))
/Linux-v5.4/
DCREDITS5 (W), PGP key ID and fingerprint (P), description (D), and
29 W: http://www.arbornet.org/~dragos
37 W: http://alumnus.caltech.edu/~madler/
49 W: http://www.csn.ul.ie/~airlied
57 W: http://www.moses.uklinux.net/patches
65 W: http://www.almesberger.net/
80 W: http://www-stu.christs.cam.ac.uk/~aia21/
88 W: http://www.pdos.lcs.mit.edu/~cananian
99 W: http://www.codepoet.org/
109 W: http://www.subcarrier.org/mang
[all …]
/Linux-v5.4/drivers/atm/
DKconfig86 when going from 8W to 16W bursts.
89 bool "Enable 16W TX bursts (discouraged)"
96 bool "Enable 8W TX bursts (recommended)"
103 bool "Enable 4W TX bursts (optional)"
107 this if you have disabled 8W bursts. Enabling 4W if 8W is also set
111 bool "Enable 2W TX bursts (optional)"
115 this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or 8W
119 bool "Enable 16W RX bursts (discouraged)"
126 bool "Enable 8W RX bursts (discouraged)"
134 bool "Enable 4W RX bursts (recommended)"
[all …]
/Linux-v5.4/arch/arm64/
DKconfig.debug44 bool "Warn on W+X mappings at boot"
47 Generate a warning if any W+X mappings are found at boot.
50 W+X mappings after applying NX, as such mappings are a security risk.
56 arm64/mm: Checked W+X mappings: passed, no W+X pages found.
60 arm64/mm: Checked W+X mappings: FAILED, <N> W+X pages found.
63 still fine, as W+X mappings are not a security hole in

12345678910>>...12