/picolibc-latest/newlib/libc/include/sys/ |
D | tree.h | 92 #define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ argument 93 SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ 94 SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ 95 (head)->sph_root = tmp; \ 98 #define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ argument 99 SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ 100 SPLAY_LEFT(tmp, field) = (head)->sph_root; \ 101 (head)->sph_root = tmp; \ 104 #define SPLAY_LINKLEFT(head, tmp, field) do { \ argument 105 SPLAY_LEFT(tmp, field) = (head)->sph_root; \ [all …]
|
/picolibc-latest/newlib/libc/machine/aarch64/ |
D | strcmp.S | 37 #define tmp x6 macro 63 and tmp, src1, 7 66 cbnz tmp, L(mutual_align) 75 rev tmp, data1 76 sub has_nul, tmp, zeroones 77 orr tmp, tmp, REP8_7f 80 orr tmp, data1, REP8_7f 82 bics has_nul, has_nul, tmp /* Non-zero if NUL terminator. */ 119 mov tmp, -1 120 LS_FW tmp, tmp, shift [all …]
|
D | memchr.S | 29 #define tmp x4 macro 80 sub tmp, soff, #32 81 adds cntin, cntin, tmp 90 lsl tmp, soff, #1 91 lsr synd, synd, tmp 92 lsl synd, synd, tmp 124 add tmp, cntrem, soff 125 and tmp, tmp, #31 126 sub tmp, tmp, #32 127 neg tmp, tmp, lsl #1 [all …]
|
D | memcmp.S | 33 #define tmp x6 macro 143 and tmp, src2, 15 144 sub tmp, tmp, 16 145 sub src2, src2, tmp 146 add limit, limit, tmp 147 sub src1, src1, tmp 169 fmov tmp, d0 170 ccmp tmp, 0, 0, hi 175 cbz tmp, L(last64) 179 rev16 tmp, tmp [all …]
|
D | strcpy.S | 51 #define tmp x5 macro 102 sub tmp, src, srcin 104 add len, tmp, len, lsr 2 106 sub tmp, len, 15 108 ldr dataq2, [srcin, tmp] 110 str dataq2, [dstin, tmp] 120 sub tmp, len, 7 122 ldr data2, [srcin, tmp] 124 str data2, [dstin, tmp] 130 subs tmp, len, 3 [all …]
|
D | memrchr.S | 30 #define tmp x7 macro 102 add tmp, src, 15 107 sub tmp, tmp, synd, lsr 2 108 cmp tmp, srcin 109 csel result, tmp, xzr, hs
|
D | strnlen.S | 50 #define tmp x4 macro 90 sub tmp, src, srcin 91 add tmp, tmp, 17 92 subs cntrem, cntin, tmp
|
/picolibc-latest/newlib/libm/common/ |
D | exp2.c | 54 specialcase (double_t tmp, uint64_t sbits, uint64_t ki) in specialcase() argument 63 y = 2 * (scale + scale * tmp); in specialcase() 69 y = scale + scale * tmp; in specialcase() 77 lo = scale - y + scale * tmp; in specialcase() 104 double_t kd, r, r2, scale, tail, tmp; in exp2() local 147 tmp = tail + r * C1 + r2 * C2 + r * r2 * (C3 + r * C4); in exp2() 149 tmp = tail + r * C1 + r2 * (C2 + r * C3) + r2 * r2 * (C4 + r * C5); in exp2() 151 tmp = tail + r * C1 + r2 * (0.5 + r * C3) + r2 * r2 * (C4 + r * C5 + r2 * C6); in exp2() 154 return specialcase (tmp, sbits, ki); in exp2() 158 return scale + scale * tmp; in exp2()
|
D | exp.c | 56 specialcase (double_t tmp, uint64_t sbits, uint64_t ki) in specialcase() argument 65 y = 0x1p1009 * (scale + scale * tmp); in specialcase() 71 y = scale + scale * tmp; in specialcase() 79 lo = scale - y + scale * tmp; in specialcase() 106 double_t kd, z, r, r2, scale, tail, tmp; in exp() local 160 tmp = tail + r + r2 * C2 + r * r2 * (C3 + r * C4); in exp() 162 tmp = tail + r + r2 * (C2 + r * C3) + r2 * r2 * (C4 + r * C5); in exp() 164 tmp = tail + r + r2 * (0.5 + r * C3) + r2 * r2 * (C4 + r * C5 + r2 * C6); in exp() 167 return specialcase (tmp, sbits, ki); in exp() 171 return scale + scale * tmp; in exp()
|
D | sf_log.c | 55 uint32_t ix, iz, tmp; in logf() local 81 tmp = ix - OFF; in logf() 82 i = (tmp >> (23 - LOGF_TABLE_BITS)) % N; in logf() 83 k = (int32_t) tmp >> 23; /* arithmetic shift */ in logf() 84 iz = ix - (tmp & (uint32_t) 0x1ff << 23); in logf()
|
D | sf_log2.c | 54 uint32_t ix, iz, top, tmp; in log2f() local 80 tmp = ix - OFF; in log2f() 81 i = (tmp >> (23 - LOG2F_TABLE_BITS)) % N; in log2f() 82 top = tmp & 0xff800000; in log2f() 84 k = (int32_t) tmp >> 23; /* arithmetic shift */ in log2f()
|
D | s_round.c | 93 __uint32_t tmp; in round64() local 99 tmp = lsw + (1 << (51 - exponent_less_1023)); in round64() 100 if (tmp < lsw) in round64() 102 lsw = tmp; in round64()
|
D | pow.c | 64 uint64_t iz, tmp; in log_inline() local 70 tmp = ix - OFF; in log_inline() 71 i = (tmp >> (52 - POW_LOG_TABLE_BITS)) % N; in log_inline() 72 k = (int64_t) tmp >> 52; /* arithmetic shift */ in log_inline() 73 iz = ix - (tmp & 0xfffULL << 52); in log_inline() 151 specialcase (double_t tmp, uint64_t sbits, uint64_t ki) in specialcase() argument 160 y = 0x1p1009 * (scale + scale * tmp); in specialcase() 167 y = scale + scale * tmp; in specialcase() 181 lo = scale - y + scale * tmp; in specialcase() 205 double_t kd, z, r, r2, scale, tail, tmp; in exp_inline() local [all …]
|
D | s_llround.c | 67 unsigned int tmp = lsw in llround64() local 70 if (tmp < lsw) in llround64() 75 | SAFE_RIGHT_SHIFT (tmp, (52 - exponent_less_1023)); in llround64()
|
D | s_lround.c | 105 __uint32_t tmp = lsw in lround64() local 109 if (tmp < lsw) in lround64() 116 | SAFE_RIGHT_SHIFT (tmp, (52 - exponent_less_1023)); in lround64()
|
/picolibc-latest/newlib/libc/stdlib/ |
D | l64a.c | 35 unsigned long tmp = (unsigned long)value & 0xffffffff; in l64a() local 43 if (tmp == 0) in l64a() 49 index = tmp & (64 - 1); in l64a() 51 tmp >>= 6; in l64a()
|
D | wctomb_r.c | 61 wchar_t tmp; in __utf8_wctomb() local 62 tmp = ((uint32_t) state->__value.__wchb[0] << 16 | (uint32_t) state->__value.__wchb[1] << 8) in __utf8_wctomb() 64 *s++ = 0xe0 | ((tmp & 0xf000) >> 12); in __utf8_wctomb() 65 *s++ = 0x80 | ((tmp & 0xfc0) >> 6); in __utf8_wctomb() 66 *s++ = 0x80 | (tmp & 0x3f); in __utf8_wctomb() 86 uint32_t tmp; in __utf8_wctomb() local 91 tmp = ((wchar & 0x3ff) << 10) + 0x10000; in __utf8_wctomb() 92 state->__value.__wchb[0] = (tmp >> 16) & 0xff; in __utf8_wctomb() 93 state->__value.__wchb[1] = (tmp >> 8) & 0xff; in __utf8_wctomb() 95 *s = (0xf0 | ((tmp & 0x1c0000) >> 18)); in __utf8_wctomb() [all …]
|
/picolibc-latest/newlib/libc/machine/spu/ |
D | stack_reg_va.S | 86 #define tmp $74 macro 109 il tmp, -(SPE_STACK_REGS+2+3)*16 110 a code_ptr, $sp, tmp 111 lqr tmp, save_regs_1 /* store stack code */ 112 stqd tmp, 0(code_ptr) 164 ceqbi tmp, inst, 3 /* if (reg-num == 3) tmp = 0x000000FF 000..0 */ 168 brz tmp, save_regs_1 /* if (tmp == 0) jump */
|
D | ea_internal.h | 76 size_ea_t tmp = (size_ea_t) x; in round_down_128_ea() local 77 tmp &= (~127); in round_down_128_ea() 78 return (__ea void*)tmp; in round_down_128_ea() 84 size_ea_t tmp = (size_ea_t) x; in round_up_next_128_ea() local 85 tmp += 128; in round_up_next_128_ea() 86 tmp &= (~127); in round_up_next_128_ea() 87 return (__ea void*)tmp; in round_up_next_128_ea()
|
/picolibc-latest/test/libc-testsuite/ |
D | basename.c | 31 tmp = strdup((p)); \ 32 s = basename(tmp); \ 38 free(tmp); \ 43 char *tmp, *s; in test_basename() local
|
D | dirname.c | 31 tmp = strdup((p)); \ 32 s = dirname (tmp); \ 38 free(tmp); \ 43 char *tmp, *s; in test_dirname() local
|
/picolibc-latest/newlib/libm/math/ |
D | sf_jn.c | 107 float q0, q1, h, tmp; in jnf() local 118 tmp = z * q1 - q0; in jnf() 120 q1 = tmp; in jnf() 135 tmp = n; in jnf() 137 tmp = tmp * logf(fabsf(v * tmp)); in jnf() 138 if (tmp < (float)8.8721679688e+01) { in jnf()
|
D | s_jn.c | 170 __float64 q0, q1, h, tmp; in jn64() local 181 tmp = z * q1 - q0; in jn64() 183 q1 = tmp; in jn64() 198 tmp = n; in jn64() 200 tmp = tmp * log(fabs64(v * tmp)); in jn64() 201 if (tmp < _F_64(7.09782712893383973096e+02)) { in jn64()
|
/picolibc-latest/newlib/libc/machine/arm/ |
D | memchr.S | 102 #define tmp r3 macro 149 ldrb tmp, [srcin], #1 150 cmp tmp, chrin 163 movw tmp, #0x0201 164 movt tmp, #0x0804 165 lsl soff, tmp, #4 166 vmov vrepmask0, tmp, soff 167 vmov vrepmask1, tmp, soff 179 sub tmp, soff, #32 180 adds cntin, cntin, tmp [all …]
|
/picolibc-latest/newlib/libc/string/ |
D | memmem.c | 144 size_t tmp, shift1; in memmem() local 162 tmp = shift[hash2 (hs)]; in memmem() 164 while (hs <= end && tmp == 0); in memmem() 168 hs -= tmp; in memmem() 169 if (tmp < m1) in memmem()
|