Lines Matching refs:rcpu
137 static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) in get_cpu_map_entry() argument
139 atomic_inc(&rcpu->refcnt); in get_cpu_map_entry()
145 struct bpf_cpu_map_entry *rcpu; in cpu_map_kthread_stop() local
147 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); in cpu_map_kthread_stop()
155 kthread_stop(rcpu->kthread); in cpu_map_kthread_stop()
215 static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) in put_cpu_map_entry() argument
217 if (atomic_dec_and_test(&rcpu->refcnt)) { in put_cpu_map_entry()
218 if (rcpu->prog) in put_cpu_map_entry()
219 bpf_prog_put(rcpu->prog); in put_cpu_map_entry()
221 __cpu_map_ring_cleanup(rcpu->queue); in put_cpu_map_entry()
222 ptr_ring_cleanup(rcpu->queue, NULL); in put_cpu_map_entry()
223 kfree(rcpu->queue); in put_cpu_map_entry()
224 kfree(rcpu); in put_cpu_map_entry()
228 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, in cpu_map_bpf_prog_run_xdp() argument
236 if (!rcpu->prog) in cpu_map_bpf_prog_run_xdp()
255 act = bpf_prog_run_xdp(rcpu->prog, &xdp); in cpu_map_bpf_prog_run_xdp()
269 rcpu->prog); in cpu_map_bpf_prog_run_xdp()
301 struct bpf_cpu_map_entry *rcpu = data; in cpu_map_kthread_run() local
310 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
319 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
322 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
337 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run()
351 nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats); in cpu_map_kthread_run()
379 trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats); in cpu_map_kthread_run()
385 put_cpu_map_entry(rcpu); in cpu_map_kthread_run()
395 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) in __cpu_map_load_bpf_program() argument
408 rcpu->value.bpf_prog.id = prog->aux->id; in __cpu_map_load_bpf_program()
409 rcpu->prog = prog; in __cpu_map_load_bpf_program()
419 struct bpf_cpu_map_entry *rcpu; in __cpu_map_entry_alloc() local
425 rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa); in __cpu_map_entry_alloc()
426 if (!rcpu) in __cpu_map_entry_alloc()
430 rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq), in __cpu_map_entry_alloc()
432 if (!rcpu->bulkq) in __cpu_map_entry_alloc()
436 bq = per_cpu_ptr(rcpu->bulkq, i); in __cpu_map_entry_alloc()
437 bq->obj = rcpu; in __cpu_map_entry_alloc()
441 rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa); in __cpu_map_entry_alloc()
442 if (!rcpu->queue) in __cpu_map_entry_alloc()
445 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
449 rcpu->cpu = cpu; in __cpu_map_entry_alloc()
450 rcpu->map_id = map_id; in __cpu_map_entry_alloc()
451 rcpu->value.qsize = value->qsize; in __cpu_map_entry_alloc()
453 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) in __cpu_map_entry_alloc()
457 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, in __cpu_map_entry_alloc()
459 if (IS_ERR(rcpu->kthread)) in __cpu_map_entry_alloc()
462 get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ in __cpu_map_entry_alloc()
463 get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ in __cpu_map_entry_alloc()
466 kthread_bind(rcpu->kthread, cpu); in __cpu_map_entry_alloc()
467 wake_up_process(rcpu->kthread); in __cpu_map_entry_alloc()
469 return rcpu; in __cpu_map_entry_alloc()
472 if (rcpu->prog) in __cpu_map_entry_alloc()
473 bpf_prog_put(rcpu->prog); in __cpu_map_entry_alloc()
475 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_alloc()
477 kfree(rcpu->queue); in __cpu_map_entry_alloc()
479 free_percpu(rcpu->bulkq); in __cpu_map_entry_alloc()
481 kfree(rcpu); in __cpu_map_entry_alloc()
487 struct bpf_cpu_map_entry *rcpu; in __cpu_map_entry_free() local
494 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); in __cpu_map_entry_free()
496 free_percpu(rcpu->bulkq); in __cpu_map_entry_free()
498 put_cpu_map_entry(rcpu); in __cpu_map_entry_free()
521 u32 key_cpu, struct bpf_cpu_map_entry *rcpu) in __cpu_map_entry_replace() argument
525 old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu); in __cpu_map_entry_replace()
551 struct bpf_cpu_map_entry *rcpu; in cpu_map_update_elem() local
571 rcpu = NULL; /* Same as deleting */ in cpu_map_update_elem()
574 rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id); in cpu_map_update_elem()
575 if (!rcpu) in cpu_map_update_elem()
577 rcpu->cmap = cmap; in cpu_map_update_elem()
580 __cpu_map_entry_replace(cmap, key_cpu, rcpu); in cpu_map_update_elem()
606 struct bpf_cpu_map_entry *rcpu; in cpu_map_free() local
608 rcpu = READ_ONCE(cmap->cpu_map[i]); in cpu_map_free()
609 if (!rcpu) in cpu_map_free()
622 struct bpf_cpu_map_entry *rcpu; in __cpu_map_lookup_elem() local
627 rcpu = READ_ONCE(cmap->cpu_map[key]); in __cpu_map_lookup_elem()
628 return rcpu; in __cpu_map_lookup_elem()
633 struct bpf_cpu_map_entry *rcpu = in cpu_map_lookup_elem() local
636 return rcpu ? &rcpu->value : NULL; in cpu_map_lookup_elem()
672 struct bpf_cpu_map_entry *rcpu = bq->obj; in bq_flush_to_queue() local
674 const int to_cpu = rcpu->cpu; in bq_flush_to_queue()
681 q = rcpu->queue; in bq_flush_to_queue()
701 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue()
707 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) in bq_enqueue() argument
710 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); in bq_enqueue()
730 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, in cpu_map_enqueue() argument
742 bq_enqueue(rcpu, xdpf); in cpu_map_enqueue()