1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 #include <linux/mm.h>
4 #include <linux/llist.h>
5 #include <linux/bpf.h>
6 #include <linux/irq_work.h>
7 #include <linux/bpf_mem_alloc.h>
8 #include <linux/memcontrol.h>
9 #include <asm/local.h>
10
11 /* Any context (including NMI) BPF specific memory allocator.
12 *
13 * Tracing BPF programs can attach to kprobe and fentry. Hence they
14 * run in unknown context where calling plain kmalloc() might not be safe.
15 *
16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
17 * Refill this cache asynchronously from irq_work.
18 *
19 * CPU_0 buckets
20 * 16 32 64 96 128 196 256 512 1024 2048 4096
21 * ...
22 * CPU_N buckets
23 * 16 32 64 96 128 196 256 512 1024 2048 4096
24 *
25 * The buckets are prefilled at the start.
26 * BPF programs always run with migration disabled.
27 * It's safe to allocate from cache of the current cpu with irqs disabled.
28 * Free-ing is always done into bucket of the current cpu as well.
29 * irq_work trims extra free elements from buckets with kfree
30 * and refills them with kmalloc, so global kmalloc logic takes care
31 * of freeing objects allocated by one cpu and freed on another.
32 *
33 * Every allocated objected is padded with extra 8 bytes that contains
34 * struct llist_node.
35 */
36 #define LLIST_NODE_SZ sizeof(struct llist_node)
37
38 /* similar to kmalloc, but sizeof == 8 bucket is gone */
39 static u8 size_index[24] __ro_after_init = {
40 3, /* 8 */
41 3, /* 16 */
42 4, /* 24 */
43 4, /* 32 */
44 5, /* 40 */
45 5, /* 48 */
46 5, /* 56 */
47 5, /* 64 */
48 1, /* 72 */
49 1, /* 80 */
50 1, /* 88 */
51 1, /* 96 */
52 6, /* 104 */
53 6, /* 112 */
54 6, /* 120 */
55 6, /* 128 */
56 2, /* 136 */
57 2, /* 144 */
58 2, /* 152 */
59 2, /* 160 */
60 2, /* 168 */
61 2, /* 176 */
62 2, /* 184 */
63 2 /* 192 */
64 };
65
bpf_mem_cache_idx(size_t size)66 static int bpf_mem_cache_idx(size_t size)
67 {
68 if (!size || size > 4096)
69 return -1;
70
71 if (size <= 192)
72 return size_index[(size - 1) / 8] - 1;
73
74 return fls(size - 1) - 2;
75 }
76
77 #define NUM_CACHES 11
78
79 struct bpf_mem_cache {
80 /* per-cpu list of free objects of size 'unit_size'.
81 * All accesses are done with interrupts disabled and 'active' counter
82 * protection with __llist_add() and __llist_del_first().
83 */
84 struct llist_head free_llist;
85 local_t active;
86
87 /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill
88 * are sequenced by per-cpu 'active' counter. But unit_free() cannot
89 * fail. When 'active' is busy the unit_free() will add an object to
90 * free_llist_extra.
91 */
92 struct llist_head free_llist_extra;
93
94 struct irq_work refill_work;
95 struct obj_cgroup *objcg;
96 int unit_size;
97 /* count of objects in free_llist */
98 int free_cnt;
99 int low_watermark, high_watermark, batch;
100 int percpu_size;
101 bool draining;
102 struct bpf_mem_cache *tgt;
103
104 /* list of objects to be freed after RCU GP */
105 struct llist_head free_by_rcu;
106 struct llist_node *free_by_rcu_tail;
107 struct llist_head waiting_for_gp;
108 struct llist_node *waiting_for_gp_tail;
109 struct rcu_head rcu;
110 atomic_t call_rcu_in_progress;
111 struct llist_head free_llist_extra_rcu;
112
113 /* list of objects to be freed after RCU tasks trace GP */
114 struct llist_head free_by_rcu_ttrace;
115 struct llist_head waiting_for_gp_ttrace;
116 struct rcu_head rcu_ttrace;
117 atomic_t call_rcu_ttrace_in_progress;
118 };
119
120 struct bpf_mem_caches {
121 struct bpf_mem_cache cache[NUM_CACHES];
122 };
123
__llist_del_first(struct llist_head * head)124 static struct llist_node notrace *__llist_del_first(struct llist_head *head)
125 {
126 struct llist_node *entry, *next;
127
128 entry = head->first;
129 if (!entry)
130 return NULL;
131 next = entry->next;
132 head->first = next;
133 return entry;
134 }
135
__alloc(struct bpf_mem_cache * c,int node,gfp_t flags)136 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
137 {
138 if (c->percpu_size) {
139 void **obj = kmalloc_node(c->percpu_size, flags, node);
140 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
141
142 if (!obj || !pptr) {
143 free_percpu(pptr);
144 kfree(obj);
145 return NULL;
146 }
147 obj[1] = pptr;
148 return obj;
149 }
150
151 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
152 }
153
get_memcg(const struct bpf_mem_cache * c)154 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
155 {
156 #ifdef CONFIG_MEMCG_KMEM
157 if (c->objcg)
158 return get_mem_cgroup_from_objcg(c->objcg);
159 #endif
160
161 #ifdef CONFIG_MEMCG
162 return root_mem_cgroup;
163 #else
164 return NULL;
165 #endif
166 }
167
inc_active(struct bpf_mem_cache * c,unsigned long * flags)168 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
169 {
170 if (IS_ENABLED(CONFIG_PREEMPT_RT))
171 /* In RT irq_work runs in per-cpu kthread, so disable
172 * interrupts to avoid preemption and interrupts and
173 * reduce the chance of bpf prog executing on this cpu
174 * when active counter is busy.
175 */
176 local_irq_save(*flags);
177 /* alloc_bulk runs from irq_work which will not preempt a bpf
178 * program that does unit_alloc/unit_free since IRQs are
179 * disabled there. There is no race to increment 'active'
180 * counter. It protects free_llist from corruption in case NMI
181 * bpf prog preempted this loop.
182 */
183 WARN_ON_ONCE(local_inc_return(&c->active) != 1);
184 }
185
dec_active(struct bpf_mem_cache * c,unsigned long * flags)186 static void dec_active(struct bpf_mem_cache *c, unsigned long *flags)
187 {
188 local_dec(&c->active);
189 if (IS_ENABLED(CONFIG_PREEMPT_RT))
190 local_irq_restore(*flags);
191 }
192
add_obj_to_free_list(struct bpf_mem_cache * c,void * obj)193 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
194 {
195 unsigned long flags;
196
197 inc_active(c, &flags);
198 __llist_add(obj, &c->free_llist);
199 c->free_cnt++;
200 dec_active(c, &flags);
201 }
202
203 /* Mostly runs from irq_work except __init phase. */
alloc_bulk(struct bpf_mem_cache * c,int cnt,int node,bool atomic)204 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
205 {
206 struct mem_cgroup *memcg = NULL, *old_memcg;
207 gfp_t gfp;
208 void *obj;
209 int i;
210
211 gfp = __GFP_NOWARN | __GFP_ACCOUNT;
212 gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL;
213
214 for (i = 0; i < cnt; i++) {
215 /*
216 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
217 * done only by one CPU == current CPU. Other CPUs might
218 * llist_add() and llist_del_all() in parallel.
219 */
220 obj = llist_del_first(&c->free_by_rcu_ttrace);
221 if (!obj)
222 break;
223 add_obj_to_free_list(c, obj);
224 }
225 if (i >= cnt)
226 return;
227
228 for (; i < cnt; i++) {
229 obj = llist_del_first(&c->waiting_for_gp_ttrace);
230 if (!obj)
231 break;
232 add_obj_to_free_list(c, obj);
233 }
234 if (i >= cnt)
235 return;
236
237 memcg = get_memcg(c);
238 old_memcg = set_active_memcg(memcg);
239 for (; i < cnt; i++) {
240 /* Allocate, but don't deplete atomic reserves that typical
241 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
242 * will allocate from the current numa node which is what we
243 * want here.
244 */
245 obj = __alloc(c, node, gfp);
246 if (!obj)
247 break;
248 add_obj_to_free_list(c, obj);
249 }
250 set_active_memcg(old_memcg);
251 mem_cgroup_put(memcg);
252 }
253
free_one(void * obj,bool percpu)254 static void free_one(void *obj, bool percpu)
255 {
256 if (percpu) {
257 free_percpu(((void **)obj)[1]);
258 kfree(obj);
259 return;
260 }
261
262 kfree(obj);
263 }
264
free_all(struct llist_node * llnode,bool percpu)265 static int free_all(struct llist_node *llnode, bool percpu)
266 {
267 struct llist_node *pos, *t;
268 int cnt = 0;
269
270 llist_for_each_safe(pos, t, llnode) {
271 free_one(pos, percpu);
272 cnt++;
273 }
274 return cnt;
275 }
276
__free_rcu(struct rcu_head * head)277 static void __free_rcu(struct rcu_head *head)
278 {
279 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace);
280
281 free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
282 atomic_set(&c->call_rcu_ttrace_in_progress, 0);
283 }
284
__free_rcu_tasks_trace(struct rcu_head * head)285 static void __free_rcu_tasks_trace(struct rcu_head *head)
286 {
287 /* If RCU Tasks Trace grace period implies RCU grace period,
288 * there is no need to invoke call_rcu().
289 */
290 if (rcu_trace_implies_rcu_gp())
291 __free_rcu(head);
292 else
293 call_rcu(head, __free_rcu);
294 }
295
enque_to_free(struct bpf_mem_cache * c,void * obj)296 static void enque_to_free(struct bpf_mem_cache *c, void *obj)
297 {
298 struct llist_node *llnode = obj;
299
300 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
301 * Nothing races to add to free_by_rcu_ttrace list.
302 */
303 llist_add(llnode, &c->free_by_rcu_ttrace);
304 }
305
do_call_rcu_ttrace(struct bpf_mem_cache * c)306 static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
307 {
308 struct llist_node *llnode, *t;
309
310 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
311 if (unlikely(READ_ONCE(c->draining))) {
312 llnode = llist_del_all(&c->free_by_rcu_ttrace);
313 free_all(llnode, !!c->percpu_size);
314 }
315 return;
316 }
317
318 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
319 llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
320 llist_add(llnode, &c->waiting_for_gp_ttrace);
321
322 if (unlikely(READ_ONCE(c->draining))) {
323 __free_rcu(&c->rcu_ttrace);
324 return;
325 }
326
327 /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
328 * If RCU Tasks Trace grace period implies RCU grace period, free
329 * these elements directly, else use call_rcu() to wait for normal
330 * progs to finish and finally do free_one() on each element.
331 */
332 call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
333 }
334
free_bulk(struct bpf_mem_cache * c)335 static void free_bulk(struct bpf_mem_cache *c)
336 {
337 struct bpf_mem_cache *tgt = c->tgt;
338 struct llist_node *llnode, *t;
339 unsigned long flags;
340 int cnt;
341
342 WARN_ON_ONCE(tgt->unit_size != c->unit_size);
343
344 do {
345 inc_active(c, &flags);
346 llnode = __llist_del_first(&c->free_llist);
347 if (llnode)
348 cnt = --c->free_cnt;
349 else
350 cnt = 0;
351 dec_active(c, &flags);
352 if (llnode)
353 enque_to_free(tgt, llnode);
354 } while (cnt > (c->high_watermark + c->low_watermark) / 2);
355
356 /* and drain free_llist_extra */
357 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
358 enque_to_free(tgt, llnode);
359 do_call_rcu_ttrace(tgt);
360 }
361
__free_by_rcu(struct rcu_head * head)362 static void __free_by_rcu(struct rcu_head *head)
363 {
364 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
365 struct bpf_mem_cache *tgt = c->tgt;
366 struct llist_node *llnode;
367
368 llnode = llist_del_all(&c->waiting_for_gp);
369 if (!llnode)
370 goto out;
371
372 llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace);
373
374 /* Objects went through regular RCU GP. Send them to RCU tasks trace */
375 do_call_rcu_ttrace(tgt);
376 out:
377 atomic_set(&c->call_rcu_in_progress, 0);
378 }
379
check_free_by_rcu(struct bpf_mem_cache * c)380 static void check_free_by_rcu(struct bpf_mem_cache *c)
381 {
382 struct llist_node *llnode, *t;
383 unsigned long flags;
384
385 /* drain free_llist_extra_rcu */
386 if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) {
387 inc_active(c, &flags);
388 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu))
389 if (__llist_add(llnode, &c->free_by_rcu))
390 c->free_by_rcu_tail = llnode;
391 dec_active(c, &flags);
392 }
393
394 if (llist_empty(&c->free_by_rcu))
395 return;
396
397 if (atomic_xchg(&c->call_rcu_in_progress, 1)) {
398 /*
399 * Instead of kmalloc-ing new rcu_head and triggering 10k
400 * call_rcu() to hit rcutree.qhimark and force RCU to notice
401 * the overload just ask RCU to hurry up. There could be many
402 * objects in free_by_rcu list.
403 * This hint reduces memory consumption for an artificial
404 * benchmark from 2 Gbyte to 150 Mbyte.
405 */
406 rcu_request_urgent_qs_task(current);
407 return;
408 }
409
410 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
411
412 inc_active(c, &flags);
413 WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu));
414 c->waiting_for_gp_tail = c->free_by_rcu_tail;
415 dec_active(c, &flags);
416
417 if (unlikely(READ_ONCE(c->draining))) {
418 free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
419 atomic_set(&c->call_rcu_in_progress, 0);
420 } else {
421 call_rcu_hurry(&c->rcu, __free_by_rcu);
422 }
423 }
424
bpf_mem_refill(struct irq_work * work)425 static void bpf_mem_refill(struct irq_work *work)
426 {
427 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work);
428 int cnt;
429
430 /* Racy access to free_cnt. It doesn't need to be 100% accurate */
431 cnt = c->free_cnt;
432 if (cnt < c->low_watermark)
433 /* irq_work runs on this cpu and kmalloc will allocate
434 * from the current numa node which is what we want here.
435 */
436 alloc_bulk(c, c->batch, NUMA_NO_NODE, true);
437 else if (cnt > c->high_watermark)
438 free_bulk(c);
439
440 check_free_by_rcu(c);
441 }
442
irq_work_raise(struct bpf_mem_cache * c)443 static void notrace irq_work_raise(struct bpf_mem_cache *c)
444 {
445 irq_work_queue(&c->refill_work);
446 }
447
448 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
449 * the freelist cache will be elem_size * 64 (or less) on each cpu.
450 *
451 * For bpf programs that don't have statically known allocation sizes and
452 * assuming (low_mark + high_mark) / 2 as an average number of elements per
453 * bucket and all buckets are used the total amount of memory in freelists
454 * on each cpu will be:
455 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
456 * == ~ 116 Kbyte using below heuristic.
457 * Initialized, but unused bpf allocator (not bpf map specific one) will
458 * consume ~ 11 Kbyte per cpu.
459 * Typical case will be between 11K and 116K closer to 11K.
460 * bpf progs can and should share bpf_mem_cache when possible.
461 */
init_refill_work(struct bpf_mem_cache * c)462 static void init_refill_work(struct bpf_mem_cache *c)
463 {
464 init_irq_work(&c->refill_work, bpf_mem_refill);
465 if (c->unit_size <= 256) {
466 c->low_watermark = 32;
467 c->high_watermark = 96;
468 } else {
469 /* When page_size == 4k, order-0 cache will have low_mark == 2
470 * and high_mark == 6 with batch alloc of 3 individual pages at
471 * a time.
472 * 8k allocs and above low == 1, high == 3, batch == 1.
473 */
474 c->low_watermark = max(32 * 256 / c->unit_size, 1);
475 c->high_watermark = max(96 * 256 / c->unit_size, 3);
476 }
477 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
478 }
479
prefill_mem_cache(struct bpf_mem_cache * c,int cpu)480 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
481 {
482 /* To avoid consuming memory assume that 1st run of bpf
483 * prog won't be doing more than 4 map_update_elem from
484 * irq disabled region
485 */
486 alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
487 }
488
check_obj_size(struct bpf_mem_cache * c,unsigned int idx)489 static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
490 {
491 struct llist_node *first;
492 unsigned int obj_size;
493
494 /* For per-cpu allocator, the size of free objects in free list doesn't
495 * match with unit_size and now there is no way to get the size of
496 * per-cpu pointer saved in free object, so just skip the checking.
497 */
498 if (c->percpu_size)
499 return 0;
500
501 first = c->free_llist.first;
502 if (!first)
503 return 0;
504
505 obj_size = ksize(first);
506 if (obj_size != c->unit_size) {
507 WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
508 idx, obj_size, c->unit_size);
509 return -EINVAL;
510 }
511 return 0;
512 }
513
514 /* When size != 0 bpf_mem_cache for each cpu.
515 * This is typical bpf hash map use case when all elements have equal size.
516 *
517 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
518 * kmalloc/kfree. Max allocation size is 4096 in this case.
519 * This is bpf_dynptr and bpf_kptr use case.
520 */
bpf_mem_alloc_init(struct bpf_mem_alloc * ma,int size,bool percpu)521 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
522 {
523 static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
524 int cpu, i, err, unit_size, percpu_size = 0;
525 struct bpf_mem_caches *cc, __percpu *pcc;
526 struct bpf_mem_cache *c, __percpu *pc;
527 struct obj_cgroup *objcg = NULL;
528
529 if (size) {
530 pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
531 if (!pc)
532 return -ENOMEM;
533
534 if (percpu)
535 /* room for llist_node and per-cpu pointer */
536 percpu_size = LLIST_NODE_SZ + sizeof(void *);
537 else
538 size += LLIST_NODE_SZ; /* room for llist_node */
539 unit_size = size;
540
541 #ifdef CONFIG_MEMCG_KMEM
542 if (memcg_bpf_enabled())
543 objcg = get_obj_cgroup_from_current();
544 #endif
545 for_each_possible_cpu(cpu) {
546 c = per_cpu_ptr(pc, cpu);
547 c->unit_size = unit_size;
548 c->objcg = objcg;
549 c->percpu_size = percpu_size;
550 c->tgt = c;
551 init_refill_work(c);
552 prefill_mem_cache(c, cpu);
553 }
554 ma->cache = pc;
555 return 0;
556 }
557
558 /* size == 0 && percpu is an invalid combination */
559 if (WARN_ON_ONCE(percpu))
560 return -EINVAL;
561
562 pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
563 if (!pcc)
564 return -ENOMEM;
565 err = 0;
566 #ifdef CONFIG_MEMCG_KMEM
567 objcg = get_obj_cgroup_from_current();
568 #endif
569 for_each_possible_cpu(cpu) {
570 cc = per_cpu_ptr(pcc, cpu);
571 for (i = 0; i < NUM_CACHES; i++) {
572 c = &cc->cache[i];
573 c->unit_size = sizes[i];
574 c->objcg = objcg;
575 c->tgt = c;
576
577 init_refill_work(c);
578 /* Another bpf_mem_cache will be used when allocating
579 * c->unit_size in bpf_mem_alloc(), so doesn't prefill
580 * for the bpf_mem_cache because these free objects will
581 * never be used.
582 */
583 if (i != bpf_mem_cache_idx(c->unit_size))
584 continue;
585 prefill_mem_cache(c, cpu);
586 err = check_obj_size(c, i);
587 if (err)
588 goto out;
589 }
590 }
591
592 out:
593 ma->caches = pcc;
594 /* refill_work is either zeroed or initialized, so it is safe to
595 * call irq_work_sync().
596 */
597 if (err)
598 bpf_mem_alloc_destroy(ma);
599 return err;
600 }
601
drain_mem_cache(struct bpf_mem_cache * c)602 static void drain_mem_cache(struct bpf_mem_cache *c)
603 {
604 bool percpu = !!c->percpu_size;
605
606 /* No progs are using this bpf_mem_cache, but htab_map_free() called
607 * bpf_mem_cache_free() for all remaining elements and they can be in
608 * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now.
609 *
610 * Except for waiting_for_gp_ttrace list, there are no concurrent operations
611 * on these lists, so it is safe to use __llist_del_all().
612 */
613 free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
614 free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
615 free_all(__llist_del_all(&c->free_llist), percpu);
616 free_all(__llist_del_all(&c->free_llist_extra), percpu);
617 free_all(__llist_del_all(&c->free_by_rcu), percpu);
618 free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu);
619 free_all(llist_del_all(&c->waiting_for_gp), percpu);
620 }
621
check_mem_cache(struct bpf_mem_cache * c)622 static void check_mem_cache(struct bpf_mem_cache *c)
623 {
624 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace));
625 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
626 WARN_ON_ONCE(!llist_empty(&c->free_llist));
627 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra));
628 WARN_ON_ONCE(!llist_empty(&c->free_by_rcu));
629 WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu));
630 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
631 }
632
check_leaked_objs(struct bpf_mem_alloc * ma)633 static void check_leaked_objs(struct bpf_mem_alloc *ma)
634 {
635 struct bpf_mem_caches *cc;
636 struct bpf_mem_cache *c;
637 int cpu, i;
638
639 if (ma->cache) {
640 for_each_possible_cpu(cpu) {
641 c = per_cpu_ptr(ma->cache, cpu);
642 check_mem_cache(c);
643 }
644 }
645 if (ma->caches) {
646 for_each_possible_cpu(cpu) {
647 cc = per_cpu_ptr(ma->caches, cpu);
648 for (i = 0; i < NUM_CACHES; i++) {
649 c = &cc->cache[i];
650 check_mem_cache(c);
651 }
652 }
653 }
654 }
655
free_mem_alloc_no_barrier(struct bpf_mem_alloc * ma)656 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
657 {
658 check_leaked_objs(ma);
659 free_percpu(ma->cache);
660 free_percpu(ma->caches);
661 ma->cache = NULL;
662 ma->caches = NULL;
663 }
664
free_mem_alloc(struct bpf_mem_alloc * ma)665 static void free_mem_alloc(struct bpf_mem_alloc *ma)
666 {
667 /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
668 * might still execute. Wait for them.
669 *
670 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
671 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
672 * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(),
673 * so if call_rcu(head, __free_rcu) is skipped due to
674 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
675 * using rcu_trace_implies_rcu_gp() as well.
676 */
677 rcu_barrier(); /* wait for __free_by_rcu */
678 rcu_barrier_tasks_trace(); /* wait for __free_rcu */
679 if (!rcu_trace_implies_rcu_gp())
680 rcu_barrier();
681 free_mem_alloc_no_barrier(ma);
682 }
683
free_mem_alloc_deferred(struct work_struct * work)684 static void free_mem_alloc_deferred(struct work_struct *work)
685 {
686 struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work);
687
688 free_mem_alloc(ma);
689 kfree(ma);
690 }
691
destroy_mem_alloc(struct bpf_mem_alloc * ma,int rcu_in_progress)692 static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
693 {
694 struct bpf_mem_alloc *copy;
695
696 if (!rcu_in_progress) {
697 /* Fast path. No callbacks are pending, hence no need to do
698 * rcu_barrier-s.
699 */
700 free_mem_alloc_no_barrier(ma);
701 return;
702 }
703
704 copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL);
705 if (!copy) {
706 /* Slow path with inline barrier-s */
707 free_mem_alloc(ma);
708 return;
709 }
710
711 /* Defer barriers into worker to let the rest of map memory to be freed */
712 memset(ma, 0, sizeof(*ma));
713 INIT_WORK(©->work, free_mem_alloc_deferred);
714 queue_work(system_unbound_wq, ©->work);
715 }
716
bpf_mem_alloc_destroy(struct bpf_mem_alloc * ma)717 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
718 {
719 struct bpf_mem_caches *cc;
720 struct bpf_mem_cache *c;
721 int cpu, i, rcu_in_progress;
722
723 if (ma->cache) {
724 rcu_in_progress = 0;
725 for_each_possible_cpu(cpu) {
726 c = per_cpu_ptr(ma->cache, cpu);
727 WRITE_ONCE(c->draining, true);
728 irq_work_sync(&c->refill_work);
729 drain_mem_cache(c);
730 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
731 rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
732 }
733 /* objcg is the same across cpus */
734 if (c->objcg)
735 obj_cgroup_put(c->objcg);
736 destroy_mem_alloc(ma, rcu_in_progress);
737 }
738 if (ma->caches) {
739 rcu_in_progress = 0;
740 for_each_possible_cpu(cpu) {
741 cc = per_cpu_ptr(ma->caches, cpu);
742 for (i = 0; i < NUM_CACHES; i++) {
743 c = &cc->cache[i];
744 WRITE_ONCE(c->draining, true);
745 irq_work_sync(&c->refill_work);
746 drain_mem_cache(c);
747 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
748 rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
749 }
750 }
751 if (c->objcg)
752 obj_cgroup_put(c->objcg);
753 destroy_mem_alloc(ma, rcu_in_progress);
754 }
755 }
756
757 /* notrace is necessary here and in other functions to make sure
758 * bpf programs cannot attach to them and cause llist corruptions.
759 */
unit_alloc(struct bpf_mem_cache * c)760 static void notrace *unit_alloc(struct bpf_mem_cache *c)
761 {
762 struct llist_node *llnode = NULL;
763 unsigned long flags;
764 int cnt = 0;
765
766 /* Disable irqs to prevent the following race for majority of prog types:
767 * prog_A
768 * bpf_mem_alloc
769 * preemption or irq -> prog_B
770 * bpf_mem_alloc
771 *
772 * but prog_B could be a perf_event NMI prog.
773 * Use per-cpu 'active' counter to order free_list access between
774 * unit_alloc/unit_free/bpf_mem_refill.
775 */
776 local_irq_save(flags);
777 if (local_inc_return(&c->active) == 1) {
778 llnode = __llist_del_first(&c->free_llist);
779 if (llnode) {
780 cnt = --c->free_cnt;
781 *(struct bpf_mem_cache **)llnode = c;
782 }
783 }
784 local_dec(&c->active);
785 local_irq_restore(flags);
786
787 WARN_ON(cnt < 0);
788
789 if (cnt < c->low_watermark)
790 irq_work_raise(c);
791 return llnode;
792 }
793
794 /* Though 'ptr' object could have been allocated on a different cpu
795 * add it to the free_llist of the current cpu.
796 * Let kfree() logic deal with it when it's later called from irq_work.
797 */
unit_free(struct bpf_mem_cache * c,void * ptr)798 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
799 {
800 struct llist_node *llnode = ptr - LLIST_NODE_SZ;
801 unsigned long flags;
802 int cnt = 0;
803
804 BUILD_BUG_ON(LLIST_NODE_SZ > 8);
805
806 /*
807 * Remember bpf_mem_cache that allocated this object.
808 * The hint is not accurate.
809 */
810 c->tgt = *(struct bpf_mem_cache **)llnode;
811
812 local_irq_save(flags);
813 if (local_inc_return(&c->active) == 1) {
814 __llist_add(llnode, &c->free_llist);
815 cnt = ++c->free_cnt;
816 } else {
817 /* unit_free() cannot fail. Therefore add an object to atomic
818 * llist. free_bulk() will drain it. Though free_llist_extra is
819 * a per-cpu list we have to use atomic llist_add here, since
820 * it also can be interrupted by bpf nmi prog that does another
821 * unit_free() into the same free_llist_extra.
822 */
823 llist_add(llnode, &c->free_llist_extra);
824 }
825 local_dec(&c->active);
826 local_irq_restore(flags);
827
828 if (cnt > c->high_watermark)
829 /* free few objects from current cpu into global kmalloc pool */
830 irq_work_raise(c);
831 }
832
unit_free_rcu(struct bpf_mem_cache * c,void * ptr)833 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
834 {
835 struct llist_node *llnode = ptr - LLIST_NODE_SZ;
836 unsigned long flags;
837
838 c->tgt = *(struct bpf_mem_cache **)llnode;
839
840 local_irq_save(flags);
841 if (local_inc_return(&c->active) == 1) {
842 if (__llist_add(llnode, &c->free_by_rcu))
843 c->free_by_rcu_tail = llnode;
844 } else {
845 llist_add(llnode, &c->free_llist_extra_rcu);
846 }
847 local_dec(&c->active);
848 local_irq_restore(flags);
849
850 if (!atomic_read(&c->call_rcu_in_progress))
851 irq_work_raise(c);
852 }
853
854 /* Called from BPF program or from sys_bpf syscall.
855 * In both cases migration is disabled.
856 */
bpf_mem_alloc(struct bpf_mem_alloc * ma,size_t size)857 void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
858 {
859 int idx;
860 void *ret;
861
862 if (!size)
863 return ZERO_SIZE_PTR;
864
865 idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
866 if (idx < 0)
867 return NULL;
868
869 ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx);
870 return !ret ? NULL : ret + LLIST_NODE_SZ;
871 }
872
bpf_mem_free(struct bpf_mem_alloc * ma,void * ptr)873 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
874 {
875 int idx;
876
877 if (!ptr)
878 return;
879
880 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
881 if (idx < 0)
882 return;
883
884 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
885 }
886
bpf_mem_free_rcu(struct bpf_mem_alloc * ma,void * ptr)887 void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
888 {
889 int idx;
890
891 if (!ptr)
892 return;
893
894 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
895 if (idx < 0)
896 return;
897
898 unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
899 }
900
bpf_mem_cache_alloc(struct bpf_mem_alloc * ma)901 void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma)
902 {
903 void *ret;
904
905 ret = unit_alloc(this_cpu_ptr(ma->cache));
906 return !ret ? NULL : ret + LLIST_NODE_SZ;
907 }
908
bpf_mem_cache_free(struct bpf_mem_alloc * ma,void * ptr)909 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
910 {
911 if (!ptr)
912 return;
913
914 unit_free(this_cpu_ptr(ma->cache), ptr);
915 }
916
bpf_mem_cache_free_rcu(struct bpf_mem_alloc * ma,void * ptr)917 void notrace bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
918 {
919 if (!ptr)
920 return;
921
922 unit_free_rcu(this_cpu_ptr(ma->cache), ptr);
923 }
924
925 /* Directly does a kfree() without putting 'ptr' back to the free_llist
926 * for reuse and without waiting for a rcu_tasks_trace gp.
927 * The caller must first go through the rcu_tasks_trace gp for 'ptr'
928 * before calling bpf_mem_cache_raw_free().
929 * It could be used when the rcu_tasks_trace callback does not have
930 * a hold on the original bpf_mem_alloc object that allocated the
931 * 'ptr'. This should only be used in the uncommon code path.
932 * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled
933 * and may affect performance.
934 */
bpf_mem_cache_raw_free(void * ptr)935 void bpf_mem_cache_raw_free(void *ptr)
936 {
937 if (!ptr)
938 return;
939
940 kfree(ptr - LLIST_NODE_SZ);
941 }
942
943 /* When flags == GFP_KERNEL, it signals that the caller will not cause
944 * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use
945 * kmalloc if the free_llist is empty.
946 */
bpf_mem_cache_alloc_flags(struct bpf_mem_alloc * ma,gfp_t flags)947 void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
948 {
949 struct bpf_mem_cache *c;
950 void *ret;
951
952 c = this_cpu_ptr(ma->cache);
953
954 ret = unit_alloc(c);
955 if (!ret && flags == GFP_KERNEL) {
956 struct mem_cgroup *memcg, *old_memcg;
957
958 memcg = get_memcg(c);
959 old_memcg = set_active_memcg(memcg);
960 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
961 set_active_memcg(old_memcg);
962 mem_cgroup_put(memcg);
963 }
964
965 return !ret ? NULL : ret + LLIST_NODE_SZ;
966 }
967
bpf_mem_cache_adjust_size(void)968 static __init int bpf_mem_cache_adjust_size(void)
969 {
970 unsigned int size;
971
972 /* Adjusting the indexes in size_index() according to the object_size
973 * of underlying slab cache, so bpf_mem_alloc() will select a
974 * bpf_mem_cache with unit_size equal to the object_size of
975 * the underlying slab cache.
976 *
977 * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
978 * 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
979 */
980 for (size = 192; size >= 8; size -= 8) {
981 unsigned int kmalloc_size, index;
982
983 kmalloc_size = kmalloc_size_roundup(size);
984 if (kmalloc_size == size)
985 continue;
986
987 if (kmalloc_size <= 192)
988 index = size_index[(kmalloc_size - 1) / 8];
989 else
990 index = fls(kmalloc_size - 1) - 1;
991 /* Only overwrite if necessary */
992 if (size_index[(size - 1) / 8] != index)
993 size_index[(size - 1) / 8] = index;
994 }
995
996 return 0;
997 }
998 subsys_initcall(bpf_mem_cache_adjust_size);
999