Lines Matching +full:pre +full:- +full:programs
1 // SPDX-License-Identifier: GPL-2.0-only
12 * CPU will do SKB-allocation and call the normal network stack.
17 * basically allows for 10G wirespeed pre-filtering via bpf.
36 * will maximum be stored/queued for one driver ->poll() call. It is
42 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
58 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
63 /* Queue with potential multi-producers, and single-consumer kthread */
86 u32 value_size = attr->value_size; in cpu_map_alloc()
88 int err = -ENOMEM; in cpu_map_alloc()
91 return ERR_PTR(-EPERM); in cpu_map_alloc()
94 if (attr->max_entries == 0 || attr->key_size != 4 || in cpu_map_alloc()
97 attr->map_flags & ~BPF_F_NUMA_NODE) in cpu_map_alloc()
98 return ERR_PTR(-EINVAL); in cpu_map_alloc()
102 return ERR_PTR(-ENOMEM); in cpu_map_alloc()
104 bpf_map_init_from_attr(&cmap->map, attr); in cpu_map_alloc()
106 /* Pre-limit array size based on NR_CPUS, not final CPU check */ in cpu_map_alloc()
107 if (cmap->map.max_entries > NR_CPUS) { in cpu_map_alloc()
108 err = -E2BIG; in cpu_map_alloc()
113 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc()
115 cmap->map.numa_node); in cpu_map_alloc()
116 if (!cmap->cpu_map) in cpu_map_alloc()
119 return &cmap->map; in cpu_map_alloc()
127 atomic_inc(&rcpu->refcnt); in get_cpu_map_entry()
138 * as it waits until all in-flight call_rcu() callbacks complete. in cpu_map_kthread_stop()
143 kthread_stop(rcpu->kthread); in cpu_map_kthread_stop()
148 /* The tear-down procedure should have made sure that queue is in __cpu_map_ring_cleanup()
149 * empty. See __cpu_map_entry_replace() and work-queue in __cpu_map_ring_cleanup()
162 if (atomic_dec_and_test(&rcpu->refcnt)) { in put_cpu_map_entry()
163 if (rcpu->prog) in put_cpu_map_entry()
164 bpf_prog_put(rcpu->prog); in put_cpu_map_entry()
166 __cpu_map_ring_cleanup(rcpu->queue); in put_cpu_map_entry()
167 ptr_ring_cleanup(rcpu->queue, NULL); in put_cpu_map_entry()
168 kfree(rcpu->queue); in put_cpu_map_entry()
183 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); in cpu_map_bpf_prog_run_skb()
189 err = xdp_do_generic_redirect(skb->dev, skb, &xdp, in cpu_map_bpf_prog_run_skb()
190 rcpu->prog); in cpu_map_bpf_prog_run_skb()
193 stats->drop++; in cpu_map_bpf_prog_run_skb()
195 stats->redirect++; in cpu_map_bpf_prog_run_skb()
199 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
202 trace_xdp_exception(skb->dev, rcpu->prog, act); in cpu_map_bpf_prog_run_skb()
207 stats->drop++; in cpu_map_bpf_prog_run_skb()
229 rxq.dev = xdpf->dev_rx; in cpu_map_bpf_prog_run_xdp()
230 rxq.mem = xdpf->mem; in cpu_map_bpf_prog_run_xdp()
235 act = bpf_prog_run_xdp(rcpu->prog, &xdp); in cpu_map_bpf_prog_run_xdp()
241 stats->drop++; in cpu_map_bpf_prog_run_xdp()
244 stats->pass++; in cpu_map_bpf_prog_run_xdp()
248 err = xdp_do_redirect(xdpf->dev_rx, &xdp, in cpu_map_bpf_prog_run_xdp()
249 rcpu->prog); in cpu_map_bpf_prog_run_xdp()
252 stats->drop++; in cpu_map_bpf_prog_run_xdp()
254 stats->redirect++; in cpu_map_bpf_prog_run_xdp()
258 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); in cpu_map_bpf_prog_run_xdp()
262 stats->drop++; in cpu_map_bpf_prog_run_xdp()
280 if (!rcpu->prog) in cpu_map_bpf_prog_run()
287 if (stats->redirect) in cpu_map_bpf_prog_run()
306 * from map, thus no new packets can enter. Remaining in-flight in cpu_map_kthread_run()
310 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
320 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
322 /* Recheck to avoid lost wake-up */ in cpu_map_kthread_run()
323 if (__ptr_ring_empty(rcpu->queue)) { in cpu_map_kthread_run()
336 * consume side valid as no-resize allowed of queue. in cpu_map_kthread_run()
338 n = __ptr_ring_consume_batched(rcpu->queue, frames, in cpu_map_kthread_run()
348 list_add_tail(&skb->list, &list); in cpu_map_kthread_run()
379 xdpf->dev_rx); in cpu_map_kthread_run()
385 list_add_tail(&skb->list, &list); in cpu_map_kthread_run()
390 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, in cpu_map_kthread_run()
410 if (prog->expected_attach_type != BPF_XDP_CPUMAP || in __cpu_map_load_bpf_program()
413 return -EINVAL; in __cpu_map_load_bpf_program()
416 rcpu->value.bpf_prog.id = prog->aux->id; in __cpu_map_load_bpf_program()
417 rcpu->prog = prog; in __cpu_map_load_bpf_program()
426 int numa, err, i, fd = value->bpf_prog.fd; in __cpu_map_entry_alloc()
431 /* Have map->numa_node, but choose node of redirect target CPU */ in __cpu_map_entry_alloc()
439 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), in __cpu_map_entry_alloc()
441 if (!rcpu->bulkq) in __cpu_map_entry_alloc()
445 bq = per_cpu_ptr(rcpu->bulkq, i); in __cpu_map_entry_alloc()
446 bq->obj = rcpu; in __cpu_map_entry_alloc()
450 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc()
452 if (!rcpu->queue) in __cpu_map_entry_alloc()
455 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
459 rcpu->cpu = cpu; in __cpu_map_entry_alloc()
460 rcpu->map_id = map->id; in __cpu_map_entry_alloc()
461 rcpu->value.qsize = value->qsize; in __cpu_map_entry_alloc()
467 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, in __cpu_map_entry_alloc()
469 map->id); in __cpu_map_entry_alloc()
470 if (IS_ERR(rcpu->kthread)) in __cpu_map_entry_alloc()
473 get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ in __cpu_map_entry_alloc()
474 get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ in __cpu_map_entry_alloc()
477 kthread_bind(rcpu->kthread, cpu); in __cpu_map_entry_alloc()
478 wake_up_process(rcpu->kthread); in __cpu_map_entry_alloc()
483 if (rcpu->prog) in __cpu_map_entry_alloc()
484 bpf_prog_put(rcpu->prog); in __cpu_map_entry_alloc()
486 ptr_ring_cleanup(rcpu->queue, NULL); in __cpu_map_entry_alloc()
488 kfree(rcpu->queue); in __cpu_map_entry_alloc()
490 free_percpu(rcpu->bulkq); in __cpu_map_entry_alloc()
501 * RCU grace-period have elapsed. Thus, XDP cannot queue any in __cpu_map_entry_free()
507 free_percpu(rcpu->bulkq); in __cpu_map_entry_free()
516 * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a
536 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); in __cpu_map_entry_replace()
538 call_rcu(&old_rcpu->rcu, __cpu_map_entry_free); in __cpu_map_entry_replace()
539 INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop); in __cpu_map_entry_replace()
540 schedule_work(&old_rcpu->kthread_stop_wq); in __cpu_map_entry_replace()
549 if (key_cpu >= map->max_entries) in cpu_map_delete_elem()
550 return -EINVAL; in cpu_map_delete_elem()
566 memcpy(&cpumap_value, value, map->value_size); in cpu_map_update_elem()
569 return -EINVAL; in cpu_map_update_elem()
570 if (unlikely(key_cpu >= cmap->map.max_entries)) in cpu_map_update_elem()
571 return -E2BIG; in cpu_map_update_elem()
573 return -EEXIST; in cpu_map_update_elem()
575 return -EOVERFLOW; in cpu_map_update_elem()
579 return -ENODEV; in cpu_map_update_elem()
584 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ in cpu_map_update_elem()
587 return -ENOMEM; in cpu_map_update_elem()
588 rcpu->cmap = cmap; in cpu_map_update_elem()
601 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, in cpu_map_free()
602 * so the bpf programs (can be more than one that used this map) were in cpu_map_free()
604 * these programs to complete. The rcu critical section only guarantees in cpu_map_free()
605 * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map. in cpu_map_free()
615 for (i = 0; i < cmap->map.max_entries; i++) { in cpu_map_free()
618 rcpu = rcu_dereference_raw(cmap->cpu_map[i]); in cpu_map_free()
622 /* bq flush and cleanup happens after RCU grace-period */ in cpu_map_free()
625 bpf_map_area_free(cmap->cpu_map); in cpu_map_free()
638 if (key >= map->max_entries) in __cpu_map_lookup_elem()
641 rcpu = rcu_dereference_check(cmap->cpu_map[key], in __cpu_map_lookup_elem()
651 return rcpu ? &rcpu->value : NULL; in cpu_map_lookup_elem()
660 if (index >= cmap->map.max_entries) { in cpu_map_get_next_key()
665 if (index == cmap->map.max_entries - 1) in cpu_map_get_next_key()
666 return -ENOENT; in cpu_map_get_next_key()
693 struct bpf_cpu_map_entry *rcpu = bq->obj; in bq_flush_to_queue()
695 const int to_cpu = rcpu->cpu; in bq_flush_to_queue()
699 if (unlikely(!bq->count)) in bq_flush_to_queue()
702 q = rcpu->queue; in bq_flush_to_queue()
703 spin_lock(&q->producer_lock); in bq_flush_to_queue()
705 for (i = 0; i < bq->count; i++) { in bq_flush_to_queue()
706 struct xdp_frame *xdpf = bq->q[i]; in bq_flush_to_queue()
716 bq->count = 0; in bq_flush_to_queue()
717 spin_unlock(&q->producer_lock); in bq_flush_to_queue()
719 __list_del_clearprev(&bq->flush_node); in bq_flush_to_queue()
722 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue()
725 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
731 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); in bq_enqueue()
733 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) in bq_enqueue()
738 * (e.g. ixgbe) recycle tricks based on page-refcnt. in bq_enqueue()
741 * with another CPU on page-refcnt and remaining driver code). in bq_enqueue()
743 * operation, when completing napi->poll call. in bq_enqueue()
745 bq->q[bq->count++] = xdpf; in bq_enqueue()
747 if (!bq->flush_node.prev) in bq_enqueue()
748 list_add(&bq->flush_node, flush_list); in bq_enqueue()
755 xdpf->dev_rx = dev_rx; in cpu_map_enqueue()
766 __skb_pull(skb, skb->mac_len); in cpu_map_generic_redirect()
770 ret = ptr_ring_produce(rcpu->queue, skb); in cpu_map_generic_redirect()
774 wake_up_process(rcpu->kthread); in cpu_map_generic_redirect()
776 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); in cpu_map_generic_redirect()
789 wake_up_process(bq->obj->kthread); in __cpu_map_flush()