/Linux-v6.1/arch/powerpc/include/asm/ |
D | kvm_fpu.h | 14 extern void fps_fres(u64 *fpscr, u32 *dst, u32 *src1); 15 extern void fps_frsqrte(u64 *fpscr, u32 *dst, u32 *src1); 16 extern void fps_fsqrts(u64 *fpscr, u32 *dst, u32 *src1); 18 extern void fps_fadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); 19 extern void fps_fdivs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); 20 extern void fps_fmuls(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); 21 extern void fps_fsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); 23 extern void fps_fmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, 25 extern void fps_fmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, 27 extern void fps_fnmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, [all …]
|
/Linux-v6.1/drivers/crypto/caam/ |
D | desc_constr.h | 415 #define append_math_add(desc, dest, src0, src1, len) \ argument 416 APPEND_MATH(ADD, desc, dest, src0, src1, len) 417 #define append_math_sub(desc, dest, src0, src1, len) \ argument 418 APPEND_MATH(SUB, desc, dest, src0, src1, len) 419 #define append_math_add_c(desc, dest, src0, src1, len) \ argument 420 APPEND_MATH(ADDC, desc, dest, src0, src1, len) 421 #define append_math_sub_b(desc, dest, src0, src1, len) \ argument 422 APPEND_MATH(SUBB, desc, dest, src0, src1, len) 423 #define append_math_and(desc, dest, src0, src1, len) \ argument 424 APPEND_MATH(AND, desc, dest, src0, src1, len) [all …]
|
/Linux-v6.1/arch/arm64/lib/ |
D | memcmp.S | 20 #define src1 x0 macro 39 ldr data1, [src1], 8 47 ldr data1, [src1, limit] 52 ldr data1, [src1], 8 62 /* We overlap loads between 0-32 bytes at either side of SRC1 when we 67 /* Align src1 and adjust src2 with bytes not yet done. */ 68 and tmp1, src1, 15 70 sub src1, src1, tmp1 73 /* Loop performing 16 bytes per iteration using aligned src1. 78 ldp data1, data1h, [src1], 16 [all …]
|
D | strcmp.S | 23 #define src1 x0 macro 57 sub off2, src2, src1 59 and tmp, src1, 7 67 ldr data2, [src1, off2] 68 ldr data1, [src1], 8 111 bic src1, src1, 7 112 ldr data2, [src1, off2] 113 ldr data1, [src1], 8 122 /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always 126 ldrb data1w, [src1], 1 [all …]
|
D | strncmp.S | 24 #define src1 x0 macro 63 eor tmp1, src1, src2 66 and count, src1, #7 75 ldr data1, [src1], #8 164 bic src1, src1, #7 166 ldr data1, [src1], #8 186 ldrb data1w, [src1], #1 195 /* Align the SRC1 to a dword by doing a bytewise compare and then do 205 ldrb data1w, [src1], #1 217 src1 | a a a a a a a a | b b b c c c c c | . . . [all …]
|
/Linux-v6.1/lib/crypto/ |
D | utils.c | 13 * XOR @len bytes from @src1 and @src2 together, writing the result to @dst 17 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) in __crypto_xor() argument 23 int d = (((unsigned long)dst ^ (unsigned long)src1) | in __crypto_xor() 36 *dst++ = *src1++ ^ *src2++; in __crypto_xor() 43 u64 l = get_unaligned((u64 *)src1) ^ in __crypto_xor() 47 *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; in __crypto_xor() 50 src1 += 8; in __crypto_xor() 57 u32 l = get_unaligned((u32 *)src1) ^ in __crypto_xor() 61 *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; in __crypto_xor() 64 src1 += 4; in __crypto_xor() [all …]
|
/Linux-v6.1/include/linux/ |
D | bitmap.h | 43 * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 44 * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 45 * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 46 * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) 48 * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? 49 * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? 50 * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? 54 * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap 140 bool __pure __bitmap_or_equal(const unsigned long *src1, 320 static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1, in bitmap_and() argument [all …]
|
D | linkmode.h | 35 static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1, in linkmode_andnot() argument 38 return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); in linkmode_andnot() 74 static inline int linkmode_equal(const unsigned long *src1, in linkmode_equal() argument 77 return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); in linkmode_equal() 80 static inline int linkmode_intersects(const unsigned long *src1, in linkmode_intersects() argument 83 return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); in linkmode_intersects() 86 static inline int linkmode_subset(const unsigned long *src1, in linkmode_subset() argument 89 return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); in linkmode_subset()
|
D | nodemask.h | 29 * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] 30 * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] 31 * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 32 * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 162 #define nodes_and(dst, src1, src2) \ argument 163 __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) 170 #define nodes_or(dst, src1, src2) \ argument 171 __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) 178 #define nodes_xor(dst, src1, src2) \ argument 179 __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) [all …]
|
/Linux-v6.1/arch/ia64/lib/ |
D | copy_user.S | 66 #define src1 r24 macro 103 mov src1=src // copy because of rotation 115 EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1) 130 and src2=0x7,src1 // src offset 134 // that we can reach 8-byte alignment for both src1 and dst1. 155 // We know src1 is not 8-byte aligned in this case. 197 EX(.failure_in_pipe2,(p16) ld1 val1[0]=[src1],1) 212 (p14) sub src1=src1,t2 213 (p15) sub src1=src1,t1 215 // Now both src1 and dst1 point to an 8-byte aligned address. And [all …]
|
D | memcpy_mck.S | 34 #define src1 r3 macro 146 add src1=8,src0 // 2nd src pointer 158 EK(.ex_handler, (p16) ld8 r38=[src1],16) 163 EK(.ex_handler, (p16) ld8 r36=[src1],16) 173 EX(.ex_hndlr_s, (p7) ld8 t2=[src1],8) 177 EX(.ex_hndlr_s, (p8) ld8 t3=[src1]) 206 add src1 = 3*8, src_pre_mem // first t3 src 224 EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1 234 EK(.ex_handler, (p[D]) ld8 t7 = [src1], 3*8) 239 EK(.ex_handler, (p[D]) ld8 t10 = [src1], 8) [all …]
|
D | copy_page.S | 29 #define src1 r20 macro 59 mov src1=in1 71 (p[0]) ld8 t1[0]=[src1],16 77 (p[0]) ld8 t3[0]=[src1],16 82 (p[0]) ld8 t5[0]=[src1],16 87 (p[0]) ld8 t7[0]=[src1],16
|
D | copy_page_mck.S | 70 #define src1 r3 macro 125 add src1 = 3*8, in1 // first t3 src 144 (p[D]) ld8 t4 = [src1], 3*8 // M1 154 (p[D]) ld8 t7 = [src1], 3*8 159 (p[D]) ld8 t10 = [src1], 8 164 (p[D]) ld8 t11 = [src1], 3*8 169 (p[D]) ld8 t14 = [src1], 8 174 (p[D]) ld8 t15 = [src1], 4*8 179 (p[D-1])ld8 t3 = [src1], 8
|
/Linux-v6.1/tools/include/linux/ |
D | bitmap.h | 71 static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, in bitmap_or() argument 75 *dst = *src1 | *src2; in bitmap_or() 77 __bitmap_or(dst, src1, src2, nbits); in bitmap_or() 145 * @src1: operand 1 149 static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1, in bitmap_and() argument 153 return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; in bitmap_and() 154 return __bitmap_and(dst, src1, src2, nbits); in bitmap_and() 165 static inline bool bitmap_equal(const unsigned long *src1, in bitmap_equal() argument 169 return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); in bitmap_equal() 172 return !memcmp(src1, src2, nbits / 8); in bitmap_equal() [all …]
|
/Linux-v6.1/arch/arc/kernel/ |
D | disasm.c | 113 state->src1 = state->words[1]; in disasm_instr() 115 state->src1 = get_reg(state->wb_reg, regs, cregs); in disasm_instr() 129 state->src1 = FIELD_C(state->words[0]); in disasm_instr() 130 if (state->src1 == REG_LIMM) { in disasm_instr() 132 state->src1 = state->words[1]; in disasm_instr() 134 state->src1 = get_reg(state->src1, regs, cregs); in disasm_instr() 219 state->src1 = state->words[1]; in disasm_instr() 221 state->src1 = get_reg(state->wb_reg, regs, in disasm_instr() 285 state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs); in disasm_instr() 328 state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs); in disasm_instr() [all …]
|
D | unaligned.c | 139 set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs); in fixup_load() 146 get32_unaligned_check(val, state->src1 + state->src2); in fixup_load() 148 get16_unaligned_check(val, state->src1 + state->src2); in fixup_load() 185 put32_unaligned_check(state->src1, state->src2 + state->src3); in fixup_store() 187 put16_unaligned_check(state->src1, state->src2 + state->src3); in fixup_store()
|
/Linux-v6.1/arch/m68k/math-emu/ |
D | multi_arith.h | 114 static inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1, in fp_submant() argument 119 : "g,d" (src2->lowmant), "0,0" (src1->lowmant)); in fp_submant() 121 : "d" (src2->mant.m32[1]), "0" (src1->mant.m32[1])); in fp_submant() 123 : "d" (src2->mant.m32[0]), "0" (src1->mant.m32[0])); in fp_submant() 126 #define fp_mul64(desth, destl, src1, src2) ({ \ argument 128 : "dm" (src1), "0" (src2)); \ 133 #define fp_add64(dest1, dest2, src1, src2) ({ \ argument 137 : "d" (src1), "0" (dest1)); \ 165 static inline void fp_multiplymant(union fp_mant128 *dest, struct fp_ext *src1, in fp_multiplymant() argument 170 fp_mul64(dest->m32[0], dest->m32[1], src1->mant.m32[0], src2->mant.m32[0]); in fp_multiplymant() [all …]
|
/Linux-v6.1/drivers/comedi/drivers/ni_routing/tools/ |
D | convert_c_to_py.c | 30 * <destination0>:[src0, src1, ...], 31 * <destination0>:[src0, src1, ...], 40 " # dest -> {src0:val0, src1:val1, ...}\n" in family_write() 93 * <destination0>:[src0, src1, ...], 94 * <destination0>:[src0, src1, ...], 103 " # dest -> [src0, src1, ...]\n" in device_write()
|
/Linux-v6.1/arch/sparc/kernel/ |
D | visemul.c | 55 /* 000101000 - four 16-bit compare; set rd if src1 > src2 */ 58 /* 000101100 - two 32-bit compare; set rd if src1 > src2 */ 61 /* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ 64 /* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ 67 /* 000100010 - four 16-bit compare; set rd if src1 != src2 */ 70 /* 000100110 - two 32-bit compare; set rd if src1 != src2 */ 73 /* 000101010 - four 16-bit compare; set rd if src1 == src2 */ 76 /* 000101110 - two 32-bit compare; set rd if src1 == src2 */ 603 u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; in pmul() local 605 u32 prod = src1 * src2; in pmul() [all …]
|
/Linux-v6.1/arch/x86/include/asm/ |
D | mpspec.h | 95 #define physids_and(dst, src1, src2) \ argument 96 bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC) 98 #define physids_or(dst, src1, src2) \ argument 99 bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC)
|
/Linux-v6.1/arch/sparc/lib/ |
D | xor.S | 404 ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ 415 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ 416 ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src1 + 0x10 */ 427 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ 436 ldda [%i1 + 0x30] %asi, %i4 /* %i4/%i5 = src1 + 0x30 */ 466 ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ 479 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ 485 ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ 498 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ 511 ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ [all …]
|
/Linux-v6.1/drivers/media/platform/st/sti/bdisp/ |
D | bdisp-reg.h | 168 #define BLT_INS_S1_OFF 0x00000000 /* src1 disabled */ 169 #define BLT_INS_S1_MEM 0x00000001 /* src1 fetched from memory */ 170 #define BLT_INS_S1_CF 0x00000003 /* src1 color fill */ 171 #define BLT_INS_S1_COPY 0x00000004 /* src1 direct copy */ 172 #define BLT_INS_S1_FILL 0x00000007 /* src1 firect fill */
|
D | bdisp-debug.c | 43 seq_puts(s, "SRC1=mem - "); in bdisp_dbg_dump_ins() 46 seq_puts(s, "SRC1=ColorFill - "); in bdisp_dbg_dump_ins() 49 seq_puts(s, "SRC1=copy - "); in bdisp_dbg_dump_ins() 52 seq_puts(s, "SRC1=fil - "); in bdisp_dbg_dump_ins() 55 seq_puts(s, "SRC1=??? - "); in bdisp_dbg_dump_ins()
|
/Linux-v6.1/include/crypto/ |
D | algapi.h | 150 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); 171 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, in crypto_xor_cpy() argument 178 unsigned long *s1 = (unsigned long *)src1; in crypto_xor_cpy() 188 __crypto_xor(dst, src1, src2, size); in crypto_xor_cpy()
|
/Linux-v6.1/arch/mips/alchemy/common/ |
D | dbdma.c | 394 u32 cmd0, cmd1, src1, dest1; in au1xxx_dbdma_ring_alloc() local 445 cmd0 = cmd1 = src1 = dest1 = 0; in au1xxx_dbdma_ring_alloc() 501 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1); in au1xxx_dbdma_ring_alloc() 504 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2); in au1xxx_dbdma_ring_alloc() 507 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4); in au1xxx_dbdma_ring_alloc() 511 src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8); in au1xxx_dbdma_ring_alloc() 518 src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST); in au1xxx_dbdma_ring_alloc() 520 src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC); in au1xxx_dbdma_ring_alloc() 561 src1, dest0, dest1); in au1xxx_dbdma_ring_alloc() 567 dp->dscr_source1 = src1; in au1xxx_dbdma_ring_alloc() [all …]
|