Lines Matching refs:p_scr
43 #define p_scr p6 // default register for same-cycle branches macro
69 cmp.eq p_scr, p0 = cnt, r0
78 (p_scr) br.ret.dpnt.many rp // return immediately if count = 0
84 cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
85 (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
118 cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
121 (p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
137 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
140 (p_scr) add loopcnt = -1, linecnt //
186 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
190 (p_scr) stf8 [ptr9] = fvalue, 128
194 cmp.le p_scr, p0 = 8, cnt // just a few bytes left ?
195 (p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2
207 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
210 (p_scr) add loopcnt = -1, linecnt
240 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
244 (p_scr) stf.spill [ptr9] = f0, 128
248 cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
249 (p_scr) br.cond.dpnt.many .move_bytes_from_alignment //
258 cmp.eq p_scr, p0 = loopcnt, r0
260 (p_scr) br.cond.dpnt.many .store_words
278 cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
279 (p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
299 cmp.eq p_scr, p0 = cnt, r0
301 (p_scr) br.cond.dpnt.few .restore_and_exit
346 tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
360 (p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes