Lines Matching refs:rcpu

71 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
141 static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) in get_cpu_map_entry() argument
143 atomic_inc(&rcpu->refcnt); in get_cpu_map_entry()
149 struct bpf_cpu_map_entry *rcpu; in cpu_map_kthread_stop() local
151 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); in cpu_map_kthread_stop()
159 kthread_stop(rcpu->kthread); in cpu_map_kthread_stop()
162 static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, in cpu_map_build_skb() argument
225 static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) in put_cpu_map_entry() argument
227 if (atomic_dec_and_test(&rcpu->refcnt)) { in put_cpu_map_entry()
229 __cpu_map_ring_cleanup(rcpu->queue); in put_cpu_map_entry()
230 ptr_ring_cleanup(rcpu->queue, NULL); in put_cpu_map_entry()
231 kfree(rcpu->queue); in put_cpu_map_entry()
232 kfree(rcpu); in put_cpu_map_entry()
238 struct bpf_cpu_map_entry *rcpu = data; in cpu_map_kthread_run() local
247 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
252 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
255 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
272 while ((xdpf = __ptr_ring_consume(rcpu->queue))) { in cpu_map_kthread_run()
276 skb = cpu_map_build_skb(rcpu, xdpf); in cpu_map_kthread_run()
292 trace_xdp_cpumap_kthread(rcpu->map_id, processed, drops, sched); in cpu_map_kthread_run()
298 put_cpu_map_entry(rcpu); in cpu_map_kthread_run()
306 struct bpf_cpu_map_entry *rcpu; in __cpu_map_entry_alloc() local
312 rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa); in __cpu_map_entry_alloc()
313 if (!rcpu) in __cpu_map_entry_alloc()
317 rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq), in __cpu_map_entry_alloc()
319 if (!rcpu->bulkq) in __cpu_map_entry_alloc()
323 rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa); in __cpu_map_entry_alloc()
324 if (!rcpu->queue) in __cpu_map_entry_alloc()
327 err = ptr_ring_init(rcpu->queue, qsize, gfp); in __cpu_map_entry_alloc()
331 rcpu->cpu = cpu; in __cpu_map_entry_alloc()
332 rcpu->map_id = map_id; in __cpu_map_entry_alloc()
333 rcpu->qsize = qsize; in __cpu_map_entry_alloc()
336 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, in __cpu_map_entry_alloc()
338 if (IS_ERR(rcpu->kthread)) in __cpu_map_entry_alloc()
341 get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ in __cpu_map_entry_alloc()
342 get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ in __cpu_map_entry_alloc()
345 kthread_bind(rcpu->kthread, cpu); in __cpu_map_entry_alloc()
346 wake_up_process(rcpu->kthread); in __cpu_map_entry_alloc()
348 return rcpu; in __cpu_map_entry_alloc()
351 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_alloc()
353 kfree(rcpu->queue); in __cpu_map_entry_alloc()
355 free_percpu(rcpu->bulkq); in __cpu_map_entry_alloc()
357 kfree(rcpu); in __cpu_map_entry_alloc()
363 struct bpf_cpu_map_entry *rcpu; in __cpu_map_entry_free() local
371 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); in __cpu_map_entry_free()
375 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); in __cpu_map_entry_free()
378 bq_flush_to_queue(rcpu, bq, false); in __cpu_map_entry_free()
380 free_percpu(rcpu->bulkq); in __cpu_map_entry_free()
382 put_cpu_map_entry(rcpu); in __cpu_map_entry_free()
405 u32 key_cpu, struct bpf_cpu_map_entry *rcpu) in __cpu_map_entry_replace() argument
409 old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu); in __cpu_map_entry_replace()
434 struct bpf_cpu_map_entry *rcpu; in cpu_map_update_elem() local
455 rcpu = NULL; /* Same as deleting */ in cpu_map_update_elem()
458 rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id); in cpu_map_update_elem()
459 if (!rcpu) in cpu_map_update_elem()
463 __cpu_map_entry_replace(cmap, key_cpu, rcpu); in cpu_map_update_elem()
502 struct bpf_cpu_map_entry *rcpu; in cpu_map_free() local
504 rcpu = READ_ONCE(cmap->cpu_map[i]); in cpu_map_free()
505 if (!rcpu) in cpu_map_free()
519 struct bpf_cpu_map_entry *rcpu; in __cpu_map_lookup_elem() local
524 rcpu = READ_ONCE(cmap->cpu_map[key]); in __cpu_map_lookup_elem()
525 return rcpu; in __cpu_map_lookup_elem()
530 struct bpf_cpu_map_entry *rcpu = in cpu_map_lookup_elem() local
533 return rcpu ? &rcpu->qsize : NULL; in cpu_map_lookup_elem()
563 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, in bq_flush_to_queue() argument
567 const int to_cpu = rcpu->cpu; in bq_flush_to_queue()
574 q = rcpu->queue; in bq_flush_to_queue()
595 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue()
602 static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) in bq_enqueue() argument
604 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); in bq_enqueue()
607 bq_flush_to_queue(rcpu, bq, true); in bq_enqueue()
622 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, in cpu_map_enqueue() argument
634 bq_enqueue(rcpu, xdpf); in cpu_map_enqueue()
657 struct bpf_cpu_map_entry *rcpu = READ_ONCE(cmap->cpu_map[bit]); in __cpu_map_flush() local
663 if (unlikely(!rcpu)) in __cpu_map_flush()
669 bq = this_cpu_ptr(rcpu->bulkq); in __cpu_map_flush()
670 bq_flush_to_queue(rcpu, bq, true); in __cpu_map_flush()
673 wake_up_process(rcpu->kthread); in __cpu_map_flush()