| /Linux-v6.1/tools/perf/util/ |
| D | bpf_lock_contention.c | 40 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries); in lock_contention_prepare() 41 bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries); in lock_contention_prepare() 48 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); in lock_contention_prepare() 49 bpf_map__set_max_entries(skel->maps.task_filter, ntasks); in lock_contention_prepare()
|
| D | bpf_counter_cgroup.c | 68 bpf_map__set_max_entries(skel->maps.events, map_size); in bperf_load_program() 69 bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups); in bperf_load_program() 72 bpf_map__set_max_entries(skel->maps.prev_readings, map_size); in bperf_load_program() 75 bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size); in bperf_load_program()
|
| D | bpf_off_cpu.c | 147 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); in off_cpu_prepare() 172 bpf_map__set_max_entries(skel->maps.task_filter, ntasks); in off_cpu_prepare() 175 bpf_map__set_max_entries(skel->maps.task_filter, ntasks); in off_cpu_prepare() 177 bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC); in off_cpu_prepare() 182 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps); in off_cpu_prepare()
|
| D | bpf_counter.c | 130 bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel)); in bpf_program_profiler_load_one() 131 bpf_map__set_max_entries(skel->maps.fentry_readings, 1); in bpf_program_profiler_load_one() 132 bpf_map__set_max_entries(skel->maps.accum_readings, 1); in bpf_program_profiler_load_one() 425 bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus()); in bperf_reload_leader_program() 545 bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings, in bperf__load() 548 bpf_map__set_max_entries(evsel->follower_skel->maps.filter, in bperf__load()
|
| D | bpf_ftrace.c | 42 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); in perf_ftrace__latency_prepare_bpf() 47 bpf_map__set_max_entries(skel->maps.task_filter, ntasks); in perf_ftrace__latency_prepare_bpf()
|
| /Linux-v6.1/tools/testing/selftests/bpf/prog_tests/ |
| D | ringbuf_multi.c | 55 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize"); in test_ringbuf_multi() 57 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset"); in test_ringbuf_multi() 76 …if (!ASSERT_ERR(bpf_map__set_max_entries(skel->maps.ringbuf1, 3 * page_size), "rb1_resize_after_lo… in test_ringbuf_multi()
|
| D | dynptr.c | 73 bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); in verify_fail() 100 bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); in verify_success()
|
| D | kfunc_dynptr_param.c | 66 bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); in verify_fail() 109 bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); in verify_success()
|
| D | mmap.c | 36 err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size); in test_mmap() 41 err = bpf_map__set_max_entries(skel->maps.data_map, in test_mmap()
|
| D | user_ringbuf.c | 81 err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size); in open_load_ringbuf_skel() 85 err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size); in open_load_ringbuf_skel() 722 bpf_map__set_max_entries(skel->maps.user_ringbuf, getpagesize()); in verify_fail()
|
| D | map_init.c | 52 err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz); in setup()
|
| D | lookup_and_delete.c | 58 err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES); in setup_prog()
|
| /Linux-v6.1/tools/testing/selftests/bpf/benchs/ |
| D | bench_bloom_filter_map.c | 272 bpf_map__set_max_entries(skel->maps.hashmap, args.nr_entries); in setup_skeleton() 274 bpf_map__set_max_entries(skel->maps.array_map, args.nr_entries); in setup_skeleton() 276 bpf_map__set_max_entries(skel->maps.bloom_map, args.nr_entries); in setup_skeleton()
|
| D | bench_ringbufs.c | 154 bpf_map__set_max_entries(skel->maps.ringbuf, args.ringbuf_sz); in ringbuf_setup_skeleton()
|
| /Linux-v6.1/samples/bpf/ |
| D | xdp_redirect_cpu_user.c | 364 if (bpf_map__set_max_entries(skel->maps.cpu_map, n_cpus) < 0) { in main() 371 if (bpf_map__set_max_entries(skel->maps.cpus_available, n_cpus) < 0) { in main()
|
| D | map_perf_test_user.c | 415 bpf_map__set_max_entries(map, num_map_entries); in fixup_map()
|
| D | xdp_sample_user.c | 1220 if (bpf_map__set_max_entries(sample_map[i], sample_map_count[i]) < 0) in sample_setup_maps()
|
| /Linux-v6.1/tools/lib/bpf/ |
| D | libbpf.map | 188 bpf_map__set_max_entries;
|
| D | libbpf.h | 791 LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
|
| D | libbpf.c | 4382 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) in bpf_map__set_max_entries() function
|
| /Linux-v6.1/tools/bpf/bpftool/ |
| D | prog.c | 2273 bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu); in do_profile() 2274 bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric); in do_profile() 2275 bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric); in do_profile() 2276 bpf_map__set_max_entries(profile_obj->maps.counts, 1); in do_profile()
|