Lines Matching +full:64 +full:- +full:bit
1 /* SPDX-License-Identifier: GPL-2.0 */
9 #if BITS_PER_LONG == 64
15 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
16 * @dividend: unsigned 64bit dividend
17 * @divisor: unsigned 32bit divisor
18 * @remainder: pointer to unsigned 32bit remainder
22 * This is commonly provided by 32bit archs to provide an optimized 64bit
32 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
33 * @dividend: signed 64bit dividend
34 * @divisor: signed 32bit divisor
35 * @remainder: pointer to signed 32bit remainder
46 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
47 * @dividend: unsigned 64bit dividend
48 * @divisor: unsigned 64bit divisor
49 * @remainder: pointer to unsigned 64bit remainder
60 * div64_u64 - unsigned 64bit divide with 64bit divisor
61 * @dividend: unsigned 64bit dividend
62 * @divisor: unsigned 64bit divisor
72 * div64_s64 - signed 64bit divide with 64bit divisor
73 * @dividend: signed 64bit dividend
74 * @divisor: signed 64bit divisor
115 * div_u64 - unsigned 64bit divide with 32bit divisor
116 * @dividend: unsigned 64bit dividend
117 * @divisor: unsigned 32bit divisor
119 * This is the most common 64bit divide and should be used if possible,
120 * as many 32bit archs can optimize this variant better than a full 64bit
132 * div_s64 - signed 64bit divide with 32bit divisor
133 * @dividend: signed 64bit dividend
134 * @divisor: signed 32bit divisor
148 * Many a GCC version messes this up and generates a 64x64 mult :-(
185 ret += mul_u32_u32(ah, mul) << (32 - shift); in mul_u64_u32_shr()
215 * Each of these lines computes a 64-bit intermediate result into "c", in mul_u64_u64_shr()
216 * starting at bits 32-95. The low 32-bits go into the result of the in mul_u64_u64_shr()
217 * multiplication, the high 32-bits are carried into the next step. in mul_u64_u64_shr()
224 * The 128-bit result of the multiplication is in rl.ll and rh.ll, in mul_u64_u64_shr()
229 if (shift < 64) in mul_u64_u64_shr()
230 return (rl.ll >> shift) | (rh.ll << (64 - shift)); in mul_u64_u64_shr()
255 /* Bits 32-63 of the result will be in rh.l.low. */ in mul_u64_u32_div()
258 /* Bits 0-31 of the result will be in rl.l.low. */ in mul_u64_u32_div()
269 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
272 * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
273 * @dividend: unsigned 64bit dividend
274 * @divisor: unsigned 64bit divisor
276 * Divide unsigned 64bit dividend by unsigned 64bit divisor
285 * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
286 * @dividend: signed 64bit dividend
287 * @divisor: signed 32bit divisor
289 * Divide signed 64bit dividend by signed 32bit divisor
300 div_s64((__x - (__d / 2)), __d); \