Lines Matching +full:a +full:- +full:8
1 // SPDX-License-Identifier: GPL-2.0
3 * arch/x86_64/lib/csum-partial.c
6 * in an architecture-specific manner due to speed.
12 #include <asm/word-at-a-time.h>
14 static inline unsigned short from32to16(unsigned a) in from32to16() argument
16 unsigned short b = a >> 16; in from32to16()
20 : "0" (b), "r" (a)); in from32to16()
25 * Do a checksum on an arbitrary memory area.
26 * Returns a 32bit checksum.
33 * it's best to have buff aligned on a 64-bit boundary
44 temp64 = ror32((__force u32)sum, 8); in csum_partial()
45 temp64 += (*(unsigned char *)buff << 8); in csum_partial()
46 len--; in csum_partial()
51 asm("addq 0*8(%[src]),%[res]\n\t" in csum_partial()
52 "adcq 1*8(%[src]),%[res]\n\t" in csum_partial()
53 "adcq 2*8(%[src]),%[res]\n\t" in csum_partial()
54 "adcq 3*8(%[src]),%[res]\n\t" in csum_partial()
55 "adcq 4*8(%[src]),%[res]\n\t" in csum_partial()
56 "adcq 5*8(%[src]),%[res]\n\t" in csum_partial()
57 "adcq 6*8(%[src]),%[res]\n\t" in csum_partial()
58 "adcq 7*8(%[src]),%[res]\n\t" in csum_partial()
64 len -= 64; in csum_partial()
68 asm("addq 0*8(%[src]),%[res]\n\t" in csum_partial()
69 "adcq 1*8(%[src]),%[res]\n\t" in csum_partial()
70 "adcq 2*8(%[src]),%[res]\n\t" in csum_partial()
71 "adcq 3*8(%[src]),%[res]\n\t" in csum_partial()
79 asm("addq 0*8(%[src]),%[res]\n\t" in csum_partial()
80 "adcq 1*8(%[src]),%[res]\n\t" in csum_partial()
87 if (len & 8) { in csum_partial()
88 asm("addq 0*8(%[src]),%[res]\n\t" in csum_partial()
93 buff += 8; in csum_partial()
96 unsigned int shift = (8 - (len & 7)) * 8; in csum_partial()
109 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); in csum_partial()
116 * this routine is used for miscellaneous IP-like checksums, mainly