1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/bpf.h>
14 #include <linux/btf.h>
15 #include <linux/err.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18 #include <linux/filter.h>
19 #include <linux/perf_event.h>
20 #include <uapi/linux/btf.h>
21
22 #include "map_in_map.h"
23
24 #define ARRAY_CREATE_FLAG_MASK \
25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26
bpf_array_free_percpu(struct bpf_array * array)27 static void bpf_array_free_percpu(struct bpf_array *array)
28 {
29 int i;
30
31 for (i = 0; i < array->map.max_entries; i++) {
32 free_percpu(array->pptrs[i]);
33 cond_resched();
34 }
35 }
36
bpf_array_alloc_percpu(struct bpf_array * array)37 static int bpf_array_alloc_percpu(struct bpf_array *array)
38 {
39 void __percpu *ptr;
40 int i;
41
42 for (i = 0; i < array->map.max_entries; i++) {
43 ptr = __alloc_percpu_gfp(array->elem_size, 8,
44 GFP_USER | __GFP_NOWARN);
45 if (!ptr) {
46 bpf_array_free_percpu(array);
47 return -ENOMEM;
48 }
49 array->pptrs[i] = ptr;
50 cond_resched();
51 }
52
53 return 0;
54 }
55
56 /* Called from syscall */
array_map_alloc_check(union bpf_attr * attr)57 int array_map_alloc_check(union bpf_attr *attr)
58 {
59 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
60 int numa_node = bpf_map_attr_numa_node(attr);
61
62 /* check sanity of attributes */
63 if (attr->max_entries == 0 || attr->key_size != 4 ||
64 attr->value_size == 0 ||
65 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
66 (percpu && numa_node != NUMA_NO_NODE))
67 return -EINVAL;
68
69 if (attr->value_size > KMALLOC_MAX_SIZE)
70 /* if value_size is bigger, the user space won't be able to
71 * access the elements.
72 */
73 return -E2BIG;
74
75 return 0;
76 }
77
array_map_alloc(union bpf_attr * attr)78 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
79 {
80 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
81 int ret, numa_node = bpf_map_attr_numa_node(attr);
82 u32 elem_size, index_mask, max_entries;
83 bool unpriv = !capable(CAP_SYS_ADMIN);
84 u64 cost, array_size, mask64;
85 struct bpf_array *array;
86
87 elem_size = round_up(attr->value_size, 8);
88
89 max_entries = attr->max_entries;
90
91 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
92 * upper most bit set in u32 space is undefined behavior due to
93 * resulting 1U << 32, so do it manually here in u64 space.
94 */
95 mask64 = fls_long(max_entries - 1);
96 mask64 = 1ULL << mask64;
97 mask64 -= 1;
98
99 index_mask = mask64;
100 if (unpriv) {
101 /* round up array size to nearest power of 2,
102 * since cpu will speculate within index_mask limits
103 */
104 max_entries = index_mask + 1;
105 /* Check for overflows. */
106 if (max_entries < attr->max_entries)
107 return ERR_PTR(-E2BIG);
108 }
109
110 array_size = sizeof(*array);
111 if (percpu)
112 array_size += (u64) max_entries * sizeof(void *);
113 else
114 array_size += (u64) max_entries * elem_size;
115
116 /* make sure there is no u32 overflow later in round_up() */
117 cost = array_size;
118 if (cost >= U32_MAX - PAGE_SIZE)
119 return ERR_PTR(-ENOMEM);
120 if (percpu) {
121 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
122 if (cost >= U32_MAX - PAGE_SIZE)
123 return ERR_PTR(-ENOMEM);
124 }
125 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
126
127 ret = bpf_map_precharge_memlock(cost);
128 if (ret < 0)
129 return ERR_PTR(ret);
130
131 /* allocate all map elements and zero-initialize them */
132 array = bpf_map_area_alloc(array_size, numa_node);
133 if (!array)
134 return ERR_PTR(-ENOMEM);
135 array->index_mask = index_mask;
136 array->map.unpriv_array = unpriv;
137
138 /* copy mandatory map attributes */
139 bpf_map_init_from_attr(&array->map, attr);
140 array->map.pages = cost;
141 array->elem_size = elem_size;
142
143 if (percpu && bpf_array_alloc_percpu(array)) {
144 bpf_map_area_free(array);
145 return ERR_PTR(-ENOMEM);
146 }
147
148 return &array->map;
149 }
150
151 /* Called from syscall or from eBPF program */
array_map_lookup_elem(struct bpf_map * map,void * key)152 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
153 {
154 struct bpf_array *array = container_of(map, struct bpf_array, map);
155 u32 index = *(u32 *)key;
156
157 if (unlikely(index >= array->map.max_entries))
158 return NULL;
159
160 return array->value + array->elem_size * (index & array->index_mask);
161 }
162
163 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)164 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
165 {
166 struct bpf_array *array = container_of(map, struct bpf_array, map);
167 struct bpf_insn *insn = insn_buf;
168 u32 elem_size = round_up(map->value_size, 8);
169 const int ret = BPF_REG_0;
170 const int map_ptr = BPF_REG_1;
171 const int index = BPF_REG_2;
172
173 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
174 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
175 if (map->unpriv_array) {
176 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
177 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
178 } else {
179 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
180 }
181
182 if (is_power_of_2(elem_size)) {
183 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
184 } else {
185 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
186 }
187 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
188 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
189 *insn++ = BPF_MOV64_IMM(ret, 0);
190 return insn - insn_buf;
191 }
192
193 /* Called from eBPF program */
percpu_array_map_lookup_elem(struct bpf_map * map,void * key)194 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
195 {
196 struct bpf_array *array = container_of(map, struct bpf_array, map);
197 u32 index = *(u32 *)key;
198
199 if (unlikely(index >= array->map.max_entries))
200 return NULL;
201
202 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
203 }
204
bpf_percpu_array_copy(struct bpf_map * map,void * key,void * value)205 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
206 {
207 struct bpf_array *array = container_of(map, struct bpf_array, map);
208 u32 index = *(u32 *)key;
209 void __percpu *pptr;
210 int cpu, off = 0;
211 u32 size;
212
213 if (unlikely(index >= array->map.max_entries))
214 return -ENOENT;
215
216 /* per_cpu areas are zero-filled and bpf programs can only
217 * access 'value_size' of them, so copying rounded areas
218 * will not leak any kernel data
219 */
220 size = round_up(map->value_size, 8);
221 rcu_read_lock();
222 pptr = array->pptrs[index & array->index_mask];
223 for_each_possible_cpu(cpu) {
224 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
225 off += size;
226 }
227 rcu_read_unlock();
228 return 0;
229 }
230
231 /* Called from syscall */
array_map_get_next_key(struct bpf_map * map,void * key,void * next_key)232 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
233 {
234 struct bpf_array *array = container_of(map, struct bpf_array, map);
235 u32 index = key ? *(u32 *)key : U32_MAX;
236 u32 *next = (u32 *)next_key;
237
238 if (index >= array->map.max_entries) {
239 *next = 0;
240 return 0;
241 }
242
243 if (index == array->map.max_entries - 1)
244 return -ENOENT;
245
246 *next = index + 1;
247 return 0;
248 }
249
250 /* Called from syscall or from eBPF program */
array_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)251 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
252 u64 map_flags)
253 {
254 struct bpf_array *array = container_of(map, struct bpf_array, map);
255 u32 index = *(u32 *)key;
256
257 if (unlikely(map_flags > BPF_EXIST))
258 /* unknown flags */
259 return -EINVAL;
260
261 if (unlikely(index >= array->map.max_entries))
262 /* all elements were pre-allocated, cannot insert a new one */
263 return -E2BIG;
264
265 if (unlikely(map_flags == BPF_NOEXIST))
266 /* all elements already exist */
267 return -EEXIST;
268
269 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
270 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
271 value, map->value_size);
272 else
273 memcpy(array->value +
274 array->elem_size * (index & array->index_mask),
275 value, map->value_size);
276 return 0;
277 }
278
bpf_percpu_array_update(struct bpf_map * map,void * key,void * value,u64 map_flags)279 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
280 u64 map_flags)
281 {
282 struct bpf_array *array = container_of(map, struct bpf_array, map);
283 u32 index = *(u32 *)key;
284 void __percpu *pptr;
285 int cpu, off = 0;
286 u32 size;
287
288 if (unlikely(map_flags > BPF_EXIST))
289 /* unknown flags */
290 return -EINVAL;
291
292 if (unlikely(index >= array->map.max_entries))
293 /* all elements were pre-allocated, cannot insert a new one */
294 return -E2BIG;
295
296 if (unlikely(map_flags == BPF_NOEXIST))
297 /* all elements already exist */
298 return -EEXIST;
299
300 /* the user space will provide round_up(value_size, 8) bytes that
301 * will be copied into per-cpu area. bpf programs can only access
302 * value_size of it. During lookup the same extra bytes will be
303 * returned or zeros which were zero-filled by percpu_alloc,
304 * so no kernel data leaks possible
305 */
306 size = round_up(map->value_size, 8);
307 rcu_read_lock();
308 pptr = array->pptrs[index & array->index_mask];
309 for_each_possible_cpu(cpu) {
310 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
311 off += size;
312 }
313 rcu_read_unlock();
314 return 0;
315 }
316
317 /* Called from syscall or from eBPF program */
array_map_delete_elem(struct bpf_map * map,void * key)318 static int array_map_delete_elem(struct bpf_map *map, void *key)
319 {
320 return -EINVAL;
321 }
322
323 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
array_map_free(struct bpf_map * map)324 static void array_map_free(struct bpf_map *map)
325 {
326 struct bpf_array *array = container_of(map, struct bpf_array, map);
327
328 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
329 * so the programs (can be more than one that used this map) were
330 * disconnected from events. Wait for outstanding programs to complete
331 * and free the array
332 */
333 synchronize_rcu();
334
335 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
336 bpf_array_free_percpu(array);
337
338 bpf_map_area_free(array);
339 }
340
array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)341 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
342 struct seq_file *m)
343 {
344 void *value;
345
346 rcu_read_lock();
347
348 value = array_map_lookup_elem(map, key);
349 if (!value) {
350 rcu_read_unlock();
351 return;
352 }
353
354 seq_printf(m, "%u: ", *(u32 *)key);
355 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
356 seq_puts(m, "\n");
357
358 rcu_read_unlock();
359 }
360
array_map_check_btf(const struct bpf_map * map,const struct btf_type * key_type,const struct btf_type * value_type)361 static int array_map_check_btf(const struct bpf_map *map,
362 const struct btf_type *key_type,
363 const struct btf_type *value_type)
364 {
365 u32 int_data;
366
367 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
368 return -EINVAL;
369
370 int_data = *(u32 *)(key_type + 1);
371 /* bpf array can only take a u32 key. This check makes sure
372 * that the btf matches the attr used during map_create.
373 */
374 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
375 return -EINVAL;
376
377 return 0;
378 }
379
380 const struct bpf_map_ops array_map_ops = {
381 .map_alloc_check = array_map_alloc_check,
382 .map_alloc = array_map_alloc,
383 .map_free = array_map_free,
384 .map_get_next_key = array_map_get_next_key,
385 .map_lookup_elem = array_map_lookup_elem,
386 .map_update_elem = array_map_update_elem,
387 .map_delete_elem = array_map_delete_elem,
388 .map_gen_lookup = array_map_gen_lookup,
389 .map_seq_show_elem = array_map_seq_show_elem,
390 .map_check_btf = array_map_check_btf,
391 };
392
393 const struct bpf_map_ops percpu_array_map_ops = {
394 .map_alloc_check = array_map_alloc_check,
395 .map_alloc = array_map_alloc,
396 .map_free = array_map_free,
397 .map_get_next_key = array_map_get_next_key,
398 .map_lookup_elem = percpu_array_map_lookup_elem,
399 .map_update_elem = array_map_update_elem,
400 .map_delete_elem = array_map_delete_elem,
401 .map_check_btf = array_map_check_btf,
402 };
403
fd_array_map_alloc_check(union bpf_attr * attr)404 static int fd_array_map_alloc_check(union bpf_attr *attr)
405 {
406 /* only file descriptors can be stored in this type of map */
407 if (attr->value_size != sizeof(u32))
408 return -EINVAL;
409 return array_map_alloc_check(attr);
410 }
411
fd_array_map_free(struct bpf_map * map)412 static void fd_array_map_free(struct bpf_map *map)
413 {
414 struct bpf_array *array = container_of(map, struct bpf_array, map);
415 int i;
416
417 synchronize_rcu();
418
419 /* make sure it's empty */
420 for (i = 0; i < array->map.max_entries; i++)
421 BUG_ON(array->ptrs[i] != NULL);
422
423 bpf_map_area_free(array);
424 }
425
fd_array_map_lookup_elem(struct bpf_map * map,void * key)426 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
427 {
428 return NULL;
429 }
430
431 /* only called from syscall */
bpf_fd_array_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)432 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
433 {
434 void **elem, *ptr;
435 int ret = 0;
436
437 if (!map->ops->map_fd_sys_lookup_elem)
438 return -ENOTSUPP;
439
440 rcu_read_lock();
441 elem = array_map_lookup_elem(map, key);
442 if (elem && (ptr = READ_ONCE(*elem)))
443 *value = map->ops->map_fd_sys_lookup_elem(ptr);
444 else
445 ret = -ENOENT;
446 rcu_read_unlock();
447
448 return ret;
449 }
450
451 /* only called from syscall */
bpf_fd_array_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)452 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
453 void *key, void *value, u64 map_flags)
454 {
455 struct bpf_array *array = container_of(map, struct bpf_array, map);
456 void *new_ptr, *old_ptr;
457 u32 index = *(u32 *)key, ufd;
458
459 if (map_flags != BPF_ANY)
460 return -EINVAL;
461
462 if (index >= array->map.max_entries)
463 return -E2BIG;
464
465 ufd = *(u32 *)value;
466 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
467 if (IS_ERR(new_ptr))
468 return PTR_ERR(new_ptr);
469
470 old_ptr = xchg(array->ptrs + index, new_ptr);
471 if (old_ptr)
472 map->ops->map_fd_put_ptr(old_ptr);
473
474 return 0;
475 }
476
fd_array_map_delete_elem(struct bpf_map * map,void * key)477 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
478 {
479 struct bpf_array *array = container_of(map, struct bpf_array, map);
480 void *old_ptr;
481 u32 index = *(u32 *)key;
482
483 if (index >= array->map.max_entries)
484 return -E2BIG;
485
486 old_ptr = xchg(array->ptrs + index, NULL);
487 if (old_ptr) {
488 map->ops->map_fd_put_ptr(old_ptr);
489 return 0;
490 } else {
491 return -ENOENT;
492 }
493 }
494
prog_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)495 static void *prog_fd_array_get_ptr(struct bpf_map *map,
496 struct file *map_file, int fd)
497 {
498 struct bpf_array *array = container_of(map, struct bpf_array, map);
499 struct bpf_prog *prog = bpf_prog_get(fd);
500
501 if (IS_ERR(prog))
502 return prog;
503
504 if (!bpf_prog_array_compatible(array, prog)) {
505 bpf_prog_put(prog);
506 return ERR_PTR(-EINVAL);
507 }
508
509 return prog;
510 }
511
prog_fd_array_put_ptr(void * ptr)512 static void prog_fd_array_put_ptr(void *ptr)
513 {
514 bpf_prog_put(ptr);
515 }
516
prog_fd_array_sys_lookup_elem(void * ptr)517 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
518 {
519 return ((struct bpf_prog *)ptr)->aux->id;
520 }
521
522 /* decrement refcnt of all bpf_progs that are stored in this map */
bpf_fd_array_map_clear(struct bpf_map * map)523 static void bpf_fd_array_map_clear(struct bpf_map *map)
524 {
525 struct bpf_array *array = container_of(map, struct bpf_array, map);
526 int i;
527
528 for (i = 0; i < array->map.max_entries; i++)
529 fd_array_map_delete_elem(map, &i);
530 }
531
532 const struct bpf_map_ops prog_array_map_ops = {
533 .map_alloc_check = fd_array_map_alloc_check,
534 .map_alloc = array_map_alloc,
535 .map_free = fd_array_map_free,
536 .map_get_next_key = array_map_get_next_key,
537 .map_lookup_elem = fd_array_map_lookup_elem,
538 .map_delete_elem = fd_array_map_delete_elem,
539 .map_fd_get_ptr = prog_fd_array_get_ptr,
540 .map_fd_put_ptr = prog_fd_array_put_ptr,
541 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
542 .map_release_uref = bpf_fd_array_map_clear,
543 .map_check_btf = map_check_no_btf,
544 };
545
bpf_event_entry_gen(struct file * perf_file,struct file * map_file)546 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
547 struct file *map_file)
548 {
549 struct bpf_event_entry *ee;
550
551 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
552 if (ee) {
553 ee->event = perf_file->private_data;
554 ee->perf_file = perf_file;
555 ee->map_file = map_file;
556 }
557
558 return ee;
559 }
560
__bpf_event_entry_free(struct rcu_head * rcu)561 static void __bpf_event_entry_free(struct rcu_head *rcu)
562 {
563 struct bpf_event_entry *ee;
564
565 ee = container_of(rcu, struct bpf_event_entry, rcu);
566 fput(ee->perf_file);
567 kfree(ee);
568 }
569
bpf_event_entry_free_rcu(struct bpf_event_entry * ee)570 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
571 {
572 call_rcu(&ee->rcu, __bpf_event_entry_free);
573 }
574
perf_event_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)575 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
576 struct file *map_file, int fd)
577 {
578 struct bpf_event_entry *ee;
579 struct perf_event *event;
580 struct file *perf_file;
581 u64 value;
582
583 perf_file = perf_event_get(fd);
584 if (IS_ERR(perf_file))
585 return perf_file;
586
587 ee = ERR_PTR(-EOPNOTSUPP);
588 event = perf_file->private_data;
589 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
590 goto err_out;
591
592 ee = bpf_event_entry_gen(perf_file, map_file);
593 if (ee)
594 return ee;
595 ee = ERR_PTR(-ENOMEM);
596 err_out:
597 fput(perf_file);
598 return ee;
599 }
600
perf_event_fd_array_put_ptr(void * ptr)601 static void perf_event_fd_array_put_ptr(void *ptr)
602 {
603 bpf_event_entry_free_rcu(ptr);
604 }
605
perf_event_fd_array_release(struct bpf_map * map,struct file * map_file)606 static void perf_event_fd_array_release(struct bpf_map *map,
607 struct file *map_file)
608 {
609 struct bpf_array *array = container_of(map, struct bpf_array, map);
610 struct bpf_event_entry *ee;
611 int i;
612
613 rcu_read_lock();
614 for (i = 0; i < array->map.max_entries; i++) {
615 ee = READ_ONCE(array->ptrs[i]);
616 if (ee && ee->map_file == map_file)
617 fd_array_map_delete_elem(map, &i);
618 }
619 rcu_read_unlock();
620 }
621
622 const struct bpf_map_ops perf_event_array_map_ops = {
623 .map_alloc_check = fd_array_map_alloc_check,
624 .map_alloc = array_map_alloc,
625 .map_free = fd_array_map_free,
626 .map_get_next_key = array_map_get_next_key,
627 .map_lookup_elem = fd_array_map_lookup_elem,
628 .map_delete_elem = fd_array_map_delete_elem,
629 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
630 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
631 .map_release = perf_event_fd_array_release,
632 .map_check_btf = map_check_no_btf,
633 };
634
635 #ifdef CONFIG_CGROUPS
cgroup_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)636 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
637 struct file *map_file /* not used */,
638 int fd)
639 {
640 return cgroup_get_from_fd(fd);
641 }
642
cgroup_fd_array_put_ptr(void * ptr)643 static void cgroup_fd_array_put_ptr(void *ptr)
644 {
645 /* cgroup_put free cgrp after a rcu grace period */
646 cgroup_put(ptr);
647 }
648
cgroup_fd_array_free(struct bpf_map * map)649 static void cgroup_fd_array_free(struct bpf_map *map)
650 {
651 bpf_fd_array_map_clear(map);
652 fd_array_map_free(map);
653 }
654
655 const struct bpf_map_ops cgroup_array_map_ops = {
656 .map_alloc_check = fd_array_map_alloc_check,
657 .map_alloc = array_map_alloc,
658 .map_free = cgroup_fd_array_free,
659 .map_get_next_key = array_map_get_next_key,
660 .map_lookup_elem = fd_array_map_lookup_elem,
661 .map_delete_elem = fd_array_map_delete_elem,
662 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
663 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
664 .map_check_btf = map_check_no_btf,
665 };
666 #endif
667
array_of_map_alloc(union bpf_attr * attr)668 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
669 {
670 struct bpf_map *map, *inner_map_meta;
671
672 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
673 if (IS_ERR(inner_map_meta))
674 return inner_map_meta;
675
676 map = array_map_alloc(attr);
677 if (IS_ERR(map)) {
678 bpf_map_meta_free(inner_map_meta);
679 return map;
680 }
681
682 map->inner_map_meta = inner_map_meta;
683
684 return map;
685 }
686
array_of_map_free(struct bpf_map * map)687 static void array_of_map_free(struct bpf_map *map)
688 {
689 /* map->inner_map_meta is only accessed by syscall which
690 * is protected by fdget/fdput.
691 */
692 bpf_map_meta_free(map->inner_map_meta);
693 bpf_fd_array_map_clear(map);
694 fd_array_map_free(map);
695 }
696
array_of_map_lookup_elem(struct bpf_map * map,void * key)697 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
698 {
699 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
700
701 if (!inner_map)
702 return NULL;
703
704 return READ_ONCE(*inner_map);
705 }
706
array_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)707 static u32 array_of_map_gen_lookup(struct bpf_map *map,
708 struct bpf_insn *insn_buf)
709 {
710 struct bpf_array *array = container_of(map, struct bpf_array, map);
711 u32 elem_size = round_up(map->value_size, 8);
712 struct bpf_insn *insn = insn_buf;
713 const int ret = BPF_REG_0;
714 const int map_ptr = BPF_REG_1;
715 const int index = BPF_REG_2;
716
717 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
718 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
719 if (map->unpriv_array) {
720 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
721 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
722 } else {
723 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
724 }
725 if (is_power_of_2(elem_size))
726 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
727 else
728 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
729 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
730 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
731 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
732 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
733 *insn++ = BPF_MOV64_IMM(ret, 0);
734
735 return insn - insn_buf;
736 }
737
738 const struct bpf_map_ops array_of_maps_map_ops = {
739 .map_alloc_check = fd_array_map_alloc_check,
740 .map_alloc = array_of_map_alloc,
741 .map_free = array_of_map_free,
742 .map_get_next_key = array_map_get_next_key,
743 .map_lookup_elem = array_of_map_lookup_elem,
744 .map_delete_elem = fd_array_map_delete_elem,
745 .map_fd_get_ptr = bpf_map_fd_get_ptr,
746 .map_fd_put_ptr = bpf_map_fd_put_ptr,
747 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
748 .map_gen_lookup = array_of_map_gen_lookup,
749 .map_check_btf = map_check_no_btf,
750 };
751