Lines Matching full:pg
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
429 struct ftrace_profile_page *pg; in function_stat_next() local
431 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); in function_stat_next()
437 if ((void *)rec >= (void *)&pg->records[pg->index]) { in function_stat_next()
438 pg = pg->next; in function_stat_next()
439 if (!pg) in function_stat_next()
441 rec = &pg->records[0]; in function_stat_next()
570 struct ftrace_profile_page *pg; in ftrace_profile_reset() local
572 pg = stat->pages = stat->start; in ftrace_profile_reset()
574 while (pg) { in ftrace_profile_reset()
575 memset(pg->records, 0, PROFILE_RECORDS_SIZE); in ftrace_profile_reset()
576 pg->index = 0; in ftrace_profile_reset()
577 pg = pg->next; in ftrace_profile_reset()
586 struct ftrace_profile_page *pg; in ftrace_profile_pages_init() local
612 pg = stat->start = stat->pages; in ftrace_profile_pages_init()
617 pg->next = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
618 if (!pg->next) in ftrace_profile_pages_init()
620 pg = pg->next; in ftrace_profile_pages_init()
626 pg = stat->start; in ftrace_profile_pages_init()
627 while (pg) { in ftrace_profile_pages_init()
628 unsigned long tmp = (unsigned long)pg; in ftrace_profile_pages_init()
630 pg = pg->next; in ftrace_profile_pages_init()
1507 #define do_for_each_ftrace_rec(pg, rec) \ argument
1508 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1510 for (_____i = 0; _____i < pg->index; _____i++) { \
1511 rec = &pg->records[_____i];
1532 struct ftrace_page *pg; in lookup_rec() local
1539 for (pg = ftrace_pages_start; pg; pg = pg->next) { in lookup_rec()
1540 if (end < pg->records[0].ip || in lookup_rec()
1541 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in lookup_rec()
1543 rec = bsearch(&key, pg->records, pg->index, in lookup_rec()
1642 struct ftrace_page *pg; in __ftrace_hash_rec_update() local
1680 do_for_each_ftrace_rec(pg, rec) { in __ftrace_hash_rec_update()
1866 struct ftrace_page *pg; in __ftrace_hash_update_ipmodify() local
1885 do_for_each_ftrace_rec(pg, rec) { in __ftrace_hash_update_ipmodify()
1911 do_for_each_ftrace_rec(pg, rec) { in __ftrace_hash_update_ipmodify()
2540 struct ftrace_page *pg; in ftrace_replace_code() local
2548 do_for_each_ftrace_rec(pg, rec) { in ftrace_replace_code()
2565 struct ftrace_page *pg; member
2587 iter->pg = ftrace_pages_start; in ftrace_rec_iter_start()
2591 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_start()
2592 iter->pg = iter->pg->next; in ftrace_rec_iter_start()
2594 if (!iter->pg) in ftrace_rec_iter_start()
2610 if (iter->index >= iter->pg->index) { in ftrace_rec_iter_next()
2611 iter->pg = iter->pg->next; in ftrace_rec_iter_next()
2615 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_next()
2616 iter->pg = iter->pg->next; in ftrace_rec_iter_next()
2619 if (!iter->pg) in ftrace_rec_iter_next()
2633 return &iter->pg->records[iter->index]; in ftrace_rec_iter_record()
2965 struct ftrace_page *pg; in ftrace_shutdown() local
2968 do_for_each_ftrace_rec(pg, rec) { in ftrace_shutdown()
3096 struct ftrace_page *pg; in ftrace_update_code() local
3119 for (pg = new_pgs; pg; pg = pg->next) { in ftrace_update_code()
3121 for (i = 0; i < pg->index; i++) { in ftrace_update_code()
3127 p = &pg->records[i]; in ftrace_update_code()
3149 static int ftrace_allocate_records(struct ftrace_page *pg, int count) in ftrace_allocate_records() argument
3169 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in ftrace_allocate_records()
3171 if (!pg->records) { in ftrace_allocate_records()
3183 pg->size = cnt; in ftrace_allocate_records()
3195 struct ftrace_page *pg; in ftrace_allocate_pages() local
3202 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); in ftrace_allocate_pages()
3203 if (!pg) in ftrace_allocate_pages()
3212 cnt = ftrace_allocate_records(pg, num_to_init); in ftrace_allocate_pages()
3220 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); in ftrace_allocate_pages()
3221 if (!pg->next) in ftrace_allocate_pages()
3224 pg = pg->next; in ftrace_allocate_pages()
3230 pg = start_pg; in ftrace_allocate_pages()
3231 while (pg) { in ftrace_allocate_pages()
3232 order = get_count_order(pg->size / ENTRIES_PER_PAGE); in ftrace_allocate_pages()
3233 free_pages((unsigned long)pg->records, order); in ftrace_allocate_pages()
3234 start_pg = pg->next; in ftrace_allocate_pages()
3235 kfree(pg); in ftrace_allocate_pages()
3236 pg = start_pg; in ftrace_allocate_pages()
3250 struct ftrace_page *pg; member
3480 if (iter->idx >= iter->pg->index) { in t_func_next()
3481 if (iter->pg->next) { in t_func_next()
3482 iter->pg = iter->pg->next; in t_func_next()
3487 rec = &iter->pg->records[iter->idx++]; in t_func_next()
3586 iter->pg = ftrace_pages_start; in t_start()
3707 iter->pg = ftrace_pages_start; in ftrace_avail_open()
3731 iter->pg = ftrace_pages_start; in ftrace_enabled_open()
3815 iter->pg = ftrace_pages_start; in ftrace_regex_open()
3943 struct ftrace_page *pg; in add_rec_by_index() local
3950 do_for_each_ftrace_rec(pg, rec) { in add_rec_by_index()
3951 if (pg->index <= index) { in add_rec_by_index()
3952 index -= pg->index; in add_rec_by_index()
3956 rec = &pg->records[index]; in add_rec_by_index()
4004 struct ftrace_page *pg; in match_records() local
4036 do_for_each_ftrace_rec(pg, rec) { in match_records()
5971 struct ftrace_page *pg; in ftrace_graph_set_hash() local
5989 do_for_each_ftrace_rec(pg, rec) { in ftrace_graph_set_hash()
6144 struct ftrace_page *pg; in ftrace_process_locs() local
6189 pg = start_pg; in ftrace_process_locs()
6201 if (pg->index == pg->size) { in ftrace_process_locs()
6203 if (WARN_ON(!pg->next)) in ftrace_process_locs()
6205 pg = pg->next; in ftrace_process_locs()
6208 rec = &pg->records[pg->index++]; in ftrace_process_locs()
6213 WARN_ON(pg->next); in ftrace_process_locs()
6216 ftrace_pages = pg; in ftrace_process_locs()
6307 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) in clear_mod_from_hash() argument
6316 for (i = 0; i < pg->index; i++) { in clear_mod_from_hash()
6317 rec = &pg->records[i]; in clear_mod_from_hash()
6330 static void clear_mod_from_hashes(struct ftrace_page *pg) in clear_mod_from_hashes() argument
6339 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); in clear_mod_from_hashes()
6340 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); in clear_mod_from_hashes()
6369 struct ftrace_page *pg; in ftrace_release_mod() local
6390 for (pg = ftrace_pages_start; pg; pg = *last_pg) { in ftrace_release_mod()
6391 rec = &pg->records[0]; in ftrace_release_mod()
6398 if (WARN_ON(pg == ftrace_pages_start)) in ftrace_release_mod()
6402 if (pg == ftrace_pages) in ftrace_release_mod()
6405 ftrace_update_tot_cnt -= pg->index; in ftrace_release_mod()
6406 *last_pg = pg->next; in ftrace_release_mod()
6408 pg->next = tmp_page; in ftrace_release_mod()
6409 tmp_page = pg; in ftrace_release_mod()
6411 last_pg = &pg->next; in ftrace_release_mod()
6416 for (pg = tmp_page; pg; pg = tmp_page) { in ftrace_release_mod()
6419 clear_mod_from_hashes(pg); in ftrace_release_mod()
6421 order = get_count_order(pg->size / ENTRIES_PER_PAGE); in ftrace_release_mod()
6422 free_pages((unsigned long)pg->records, order); in ftrace_release_mod()
6423 tmp_page = pg->next; in ftrace_release_mod()
6424 kfree(pg); in ftrace_release_mod()
6433 struct ftrace_page *pg; in ftrace_module_enable() local
6456 do_for_each_ftrace_rec(pg, rec) { in ftrace_module_enable()
6460 * module text shares the pg. If a record is in ftrace_module_enable()
6461 * not part of this module, then skip this pg, in ftrace_module_enable()
6737 struct ftrace_page *pg; in ftrace_free_mem() local
6760 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { in ftrace_free_mem()
6761 if (end < pg->records[0].ip || in ftrace_free_mem()
6762 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in ftrace_free_mem()
6765 rec = bsearch(&key, pg->records, pg->index, in ftrace_free_mem()
6777 pg->index--; in ftrace_free_mem()
6779 if (!pg->index) { in ftrace_free_mem()
6780 *last_pg = pg->next; in ftrace_free_mem()
6781 order = get_count_order(pg->size / ENTRIES_PER_PAGE); in ftrace_free_mem()
6782 free_pages((unsigned long)pg->records, order); in ftrace_free_mem()
6785 kfree(pg); in ftrace_free_mem()
6786 pg = container_of(last_pg, struct ftrace_page, next); in ftrace_free_mem()
6788 ftrace_pages = pg; in ftrace_free_mem()
6792 (pg->index - (rec - pg->records)) * sizeof(*rec)); in ftrace_free_mem()