/Linux-v5.4/tools/testing/selftests/bpf/ |
D | test_lru_map.c | 29 static int create_map(int map_type, int map_flags, unsigned int size) in create_map() argument 34 sizeof(unsigned long long), size, map_flags); in create_map() 159 static void test_lru_sanity0(int map_type, int map_flags) in test_lru_sanity0() argument 166 map_flags); in test_lru_sanity0() 170 if (map_flags & BPF_F_NO_COMMON_LRU) in test_lru_sanity0() 171 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); in test_lru_sanity0() 173 lru_map_fd = create_map(map_type, map_flags, 2); in test_lru_sanity0() 248 static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) in test_lru_sanity1() argument 256 if (map_flags & BPF_F_NO_COMMON_LRU) in test_lru_sanity1() 261 map_flags); in test_lru_sanity1() [all …]
|
D | test_queue_stack_map.h | 16 __uint(map_flags, 0); 24 __uint(map_flags, 0);
|
D | test_maps.c | 35 static int map_flags; variable 43 2, map_flags); in test_hashmap() 134 2, map_flags); in test_hashmap_sizes() 156 sizeof(bpf_percpu(value, 0)), 2, map_flags); in test_hashmap_percpu() 258 max_entries, map_flags); in helper_fill_hashmap() 261 "err: %s, flags: 0x%x\n", strerror(errno), map_flags); in helper_fill_hashmap() 317 old_flags = map_flags; in test_hashmap_zero_seed() 318 map_flags |= BPF_F_ZERO_SEED; in test_hashmap_zero_seed() 340 map_flags = old_flags; in test_hashmap_zero_seed() 538 map_flags); in test_queuemap() [all …]
|
/Linux-v5.4/samples/bpf/ |
D | test_lru_dist.c | 208 static int create_map(int map_type, int map_flags, unsigned int size) in create_map() argument 213 sizeof(unsigned long long), size, map_flags); in create_map() 310 static void test_parallel_lru_dist(int map_type, int map_flags, in test_parallel_lru_dist() argument 317 map_flags); in test_parallel_lru_dist() 319 if (map_flags & BPF_F_NO_COMMON_LRU) in test_parallel_lru_dist() 320 lru_map_fd = create_map(map_type, map_flags, in test_parallel_lru_dist() 323 lru_map_fd = create_map(map_type, map_flags, in test_parallel_lru_dist() 335 static void test_lru_loss0(int map_type, int map_flags) in test_lru_loss0() argument 344 map_flags); in test_lru_loss0() 348 if (map_flags & BPF_F_NO_COMMON_LRU) in test_lru_loss0() [all …]
|
D | map_perf_test_kern.c | 35 .map_flags = BPF_F_NO_COMMON_LRU, 43 .map_flags = BPF_F_NUMA_NODE, 65 .map_flags = BPF_F_NO_PREALLOC, 73 .map_flags = BPF_F_NO_PREALLOC, 81 .map_flags = BPF_F_NO_PREALLOC,
|
D | tcp_dumpstats_kern.c | 17 __u32 map_flags; member 22 .map_flags = BPF_F_NO_PREALLOC,
|
D | bpf_load.h | 15 unsigned int map_flags; member
|
/Linux-v5.4/kernel/bpf/ |
D | hashtab.c | 76 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc() 165 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init() 237 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc_check() 238 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check() 239 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); in htab_map_alloc_check() 257 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || in htab_map_alloc_check() 258 !bpf_map_flags_access_ok(attr->map_flags)) in htab_map_alloc_check() 306 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc() 307 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc() 367 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc() [all …]
|
D | map_in_map.c | 56 inner_map_meta->map_flags = inner_map->map_flags; in bpf_map_meta_alloc() 84 meta0->map_flags == meta1->map_flags && in bpf_map_meta_equal()
|
D | reuseport_array.c | 208 u32 map_flags) in reuseport_array_update_check() argument 210 if (osk && map_flags == BPF_NOEXIST) in reuseport_array_update_check() 213 if (!osk && map_flags == BPF_EXIST) in reuseport_array_update_check() 249 void *value, u64 map_flags) in bpf_fd_reuseport_array_update_elem() argument 258 if (map_flags > BPF_EXIST) in bpf_fd_reuseport_array_update_elem() 288 map_flags); in bpf_fd_reuseport_array_update_elem() 304 err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags); in bpf_fd_reuseport_array_update_elem()
|
D | devmap.c | 111 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) in dev_map_init_map() 117 attr->map_flags |= BPF_F_RDONLY_PROG; in dev_map_init_map() 605 void *key, void *value, u64 map_flags) in __dev_map_update_elem() argument 612 if (unlikely(map_flags > BPF_EXIST)) in __dev_map_update_elem() 616 if (unlikely(map_flags == BPF_NOEXIST)) in __dev_map_update_elem() 639 u64 map_flags) in dev_map_update_elem() argument 642 map, key, value, map_flags); in dev_map_update_elem() 646 void *key, void *value, u64 map_flags) in __dev_map_hash_update_elem() argument 655 if (unlikely(map_flags > BPF_EXIST || !ifindex)) in __dev_map_hash_update_elem() 661 if (old_dev && (map_flags & BPF_NOEXIST)) in __dev_map_hash_update_elem() [all …]
|
D | arraymap.c | 57 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || in array_map_alloc_check() 58 !bpf_map_flags_access_ok(attr->map_flags) || in array_map_alloc_check() 273 u64 map_flags) in array_map_update_elem() argument 279 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) in array_map_update_elem() 287 if (unlikely(map_flags & BPF_NOEXIST)) in array_map_update_elem() 291 if (unlikely((map_flags & BPF_F_LOCK) && in array_map_update_elem() 301 if (map_flags & BPF_F_LOCK) in array_map_update_elem() 310 u64 map_flags) in bpf_percpu_array_update() argument 318 if (unlikely(map_flags > BPF_EXIST)) in bpf_percpu_array_update() 326 if (unlikely(map_flags == BPF_NOEXIST)) in bpf_percpu_array_update() [all …]
|
D | xskmap.c | 92 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) in xsk_map_alloc() 216 u64 map_flags) in xsk_map_update_elem() argument 225 if (unlikely(map_flags > BPF_EXIST)) in xsk_map_update_elem() 258 } else if (old_xs && map_flags == BPF_NOEXIST) { in xsk_map_update_elem() 261 } else if (!old_xs && map_flags == BPF_EXIST) { in xsk_map_update_elem()
|
D | local_storage.c | 201 void *value, u64 map_flags) in bpf_percpu_cgroup_storage_update() argument 209 if (map_flags != BPF_ANY && map_flags != BPF_EXIST) in bpf_percpu_cgroup_storage_update() 287 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK || in cgroup_storage_map_alloc() 288 !bpf_map_flags_access_ok(attr->map_flags)) in cgroup_storage_map_alloc()
|
D | cpumap.c | 92 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) in cpu_map_alloc() 469 u64 map_flags) in cpu_map_update_elem() argument 479 if (unlikely(map_flags > BPF_EXIST)) in cpu_map_update_elem() 483 if (unlikely(map_flags == BPF_NOEXIST)) in cpu_map_update_elem()
|
/Linux-v5.4/net/core/ |
D | bpf_sk_storage.c | 313 u64 map_flags) in check_flags() argument 315 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) in check_flags() 319 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) in check_flags() 390 u64 map_flags) in sk_storage_update() argument 399 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) || in sk_storage_update() 401 unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map))) in sk_storage_update() 408 err = check_flags(NULL, map_flags); in sk_storage_update() 426 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) { in sk_storage_update() 432 err = check_flags(old_sdata, map_flags); in sk_storage_update() 456 err = check_flags(old_sdata, map_flags); in sk_storage_update() [all …]
|
/Linux-v5.4/tools/lib/bpf/ |
D | bpf.c | 89 attr.map_flags = create_attr->map_flags; in bpf_create_map_xattr() 105 __u32 map_flags, int node) in bpf_create_map_node() argument 111 map_attr.map_flags = map_flags; in bpf_create_map_node() 117 map_attr.map_flags |= BPF_F_NUMA_NODE; in bpf_create_map_node() 124 int value_size, int max_entries, __u32 map_flags) in bpf_create_map() argument 129 map_attr.map_flags = map_flags; in bpf_create_map() 139 __u32 map_flags) in bpf_create_map_name() argument 145 map_attr.map_flags = map_flags; in bpf_create_map_name() 155 __u32 map_flags, int node) in bpf_create_map_in_map_node() argument 166 attr.map_flags = map_flags; in bpf_create_map_in_map_node() [all …]
|
D | bpf.h | 42 __u32 map_flags; member 58 int max_entries, __u32 map_flags, int node); 61 int max_entries, __u32 map_flags); 63 int value_size, int max_entries, __u32 map_flags); 67 __u32 map_flags, int node); 71 __u32 map_flags);
|
D | libbpf_probes.c | 195 int key_size, value_size, max_entries, map_flags; in bpf_probe_map_type() local 203 map_flags = 0; in bpf_probe_map_type() 212 map_flags = BPF_F_NO_PREALLOC; in bpf_probe_map_type() 229 map_flags = BPF_F_NO_PREALLOC; in bpf_probe_map_type() 278 attr.map_flags = map_flags; in bpf_probe_map_type()
|
/Linux-v5.4/tools/testing/selftests/bpf/progs/ |
D | sockopt_inherit.c | 19 __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE); 26 __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE); 33 __uint(map_flags, BPF_F_NO_PREALLOC);
|
D | test_map_in_map.c | 11 __uint(map_flags, 0); 20 __uint(map_flags, 0);
|
/Linux-v5.4/drivers/misc/mic/scif/ |
D | scif_rma.c | 114 int kernel = SCIF_MAP_KERNEL & pin->map_flags; in scif_destroy_pinned_pages() 716 if ((!!(window->pinned_pages->map_flags & SCIF_MAP_KERNEL)) && in scif_unregister_window() 1308 int map_flags, scif_pinned_pages_t *pages) in __scif_pin_pages() argument 1319 if (map_flags & ~(SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT)) in __scif_pin_pages() 1321 ulimit = !!(map_flags & SCIF_MAP_ULIMIT); in __scif_pin_pages() 1342 if (map_flags & SCIF_MAP_KERNEL) { in __scif_pin_pages() 1355 pinned_pages->map_flags = SCIF_MAP_KERNEL; in __scif_pin_pages() 1398 pinned_pages->map_flags = 0; in __scif_pin_pages() 1425 int map_flags, scif_pinned_pages_t *pages) in scif_pin_pages() argument 1427 return __scif_pin_pages(addr, len, &prot, map_flags, pages); in scif_pin_pages() [all …]
|
/Linux-v5.4/include/linux/ |
D | scif.h | 549 int prot_flags, int map_flags); 1093 int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags, 1183 off_t offset, int map_flags);
|
D | bpf.h | 87 u32 map_flags; member 457 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); in bpf_map_flags_to_cap() 685 void *key, void *value, u64 map_flags); 688 void *key, void *value, u64 map_flags); 736 return (attr->map_flags & BPF_F_NUMA_NODE) ? in bpf_map_attr_numa_node() 1005 void *value, u64 map_flags); 1020 u64 map_flags) in bpf_fd_reuseport_array_update_elem() argument
|
/Linux-v5.4/mm/ |
D | mremap.c | 508 unsigned long map_flags; in mremap_to() local 554 map_flags = MAP_FIXED; in mremap_to() 556 map_flags |= MAP_SHARED; in mremap_to() 560 map_flags); in mremap_to() 701 unsigned long map_flags = 0; in SYSCALL_DEFINE5() local 703 map_flags |= MAP_SHARED; in SYSCALL_DEFINE5() 708 map_flags); in SYSCALL_DEFINE5()
|