Lines Matching +full:64 +full:- +full:bit
1 /* SPDX-License-Identifier: GPL-2.0 */
10 #if BITS_PER_LONG == 64
16 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
17 * @dividend: unsigned 64bit dividend
18 * @divisor: unsigned 32bit divisor
19 * @remainder: pointer to unsigned 32bit remainder
23 * This is commonly provided by 32bit archs to provide an optimized 64bit
33 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
34 * @dividend: signed 64bit dividend
35 * @divisor: signed 32bit divisor
36 * @remainder: pointer to signed 32bit remainder
47 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
48 * @dividend: unsigned 64bit dividend
49 * @divisor: unsigned 64bit divisor
50 * @remainder: pointer to unsigned 64bit remainder
61 * div64_u64 - unsigned 64bit divide with 64bit divisor
62 * @dividend: unsigned 64bit dividend
63 * @divisor: unsigned 64bit divisor
73 * div64_s64 - signed 64bit divide with 64bit divisor
74 * @dividend: signed 64bit dividend
75 * @divisor: signed 64bit divisor
116 * div_u64 - unsigned 64bit divide with 32bit divisor
117 * @dividend: unsigned 64bit dividend
118 * @divisor: unsigned 32bit divisor
120 * This is the most common 64bit divide and should be used if possible,
121 * as many 32bit archs can optimize this variant better than a full 64bit
133 * div_s64 - signed 64bit divide with 32bit divisor
134 * @dividend: signed 64bit dividend
135 * @divisor: signed 32bit divisor
149 * Many a GCC version messes this up and generates a 64x64 mult :-(
186 ret += mul_u32_u32(ah, mul) << (32 - shift); in mul_u64_u32_shr()
216 * Each of these lines computes a 64-bit intermediate result into "c", in mul_u64_u64_shr()
217 * starting at bits 32-95. The low 32-bits go into the result of the in mul_u64_u64_shr()
218 * multiplication, the high 32-bits are carried into the next step. in mul_u64_u64_shr()
225 * The 128-bit result of the multiplication is in rl.ll and rh.ll, in mul_u64_u64_shr()
230 if (shift < 64) in mul_u64_u64_shr()
231 return (rl.ll >> shift) | (rh.ll << (64 - shift)); in mul_u64_u64_shr()
250 ret = -((s64) ret); in mul_s64_u64_shr()
274 /* Bits 32-63 of the result will be in rh.l.low. */ in mul_u64_u32_div()
277 /* Bits 0-31 of the result will be in rl.l.low. */ in mul_u64_u32_div()
288 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
291 * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
292 * @dividend: unsigned 64bit dividend
293 * @divisor: unsigned 64bit divisor
295 * Divide unsigned 64bit dividend by unsigned 64bit divisor
304 * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
305 * @dividend: unsigned 64bit dividend
306 * @divisor: unsigned 32bit divisor
308 * Divide unsigned 64bit dividend by unsigned 32bit divisor
317 * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
318 * @dividend: signed 64bit dividend
319 * @divisor: signed 32bit divisor
321 * Divide signed 64bit dividend by signed 32bit divisor
332 div_s64((__x - (__d / 2)), __d); \