Lines Matching full:hits
36 u32 pc, hits; member
142 * profile hits. read_profile() IPI's all cpus to request them
147 * profile hits required for the accuracy of reported profile hits
151 * and hold the number of pending hits to that profile buffer slot on
152 * a cpu in an entry. When the hashtable overflows, all pending hits
154 * atomic_add() and the hashtable emptied. As numerous pending hits
159 * positions to which hits are accounted during short intervals (e.g.
187 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() local
189 if (!hits[i].hits) { in profile_flip_buffers()
190 if (hits[i].pc) in profile_flip_buffers()
191 hits[i].pc = 0; in profile_flip_buffers()
194 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); in profile_flip_buffers()
195 hits[i].hits = hits[i].pc = 0; in profile_flip_buffers()
210 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers() local
211 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); in profile_discard_flip_buffers()
220 struct profile_hit *hits; in do_profile_hits() local
226 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; in do_profile_hits()
227 if (!hits) { in do_profile_hits()
239 if (hits[i + j].pc == pc) { in do_profile_hits()
240 hits[i + j].hits += nr_hits; in do_profile_hits()
242 } else if (!hits[i + j].hits) { in do_profile_hits()
243 hits[i + j].pc = pc; in do_profile_hits()
244 hits[i + j].hits = nr_hits; in do_profile_hits()
257 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); in do_profile_hits()
258 hits[i].pc = hits[i].hits = 0; in do_profile_hits()