Home
last modified time | relevance | path

Searched refs:flows (Results 1 – 25 of 80) sorted by relevance

1234

/Linux-v5.15/include/net/
Dfq_impl.h36 idx = flow - fq->flows; in __fq_adjust_removal()
152 flow = &fq->flows[idx]; in fq_flow_classify()
160 tin->flows++; in fq_flow_classify()
173 struct fq_flow *cur = &fq->flows[i]; in fq_find_fattest_flow()
357 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init()
358 if (!fq->flows) in fq_init()
364 kvfree(fq->flows); in fq_init()
365 fq->flows = NULL; in fq_init()
370 fq_flow_init(&fq->flows[i]); in fq_init()
381 fq_flow_reset(fq, &fq->flows[i], free_func); in fq_reset()
[all …]
Dfq.h53 u32 flows; member
65 struct fq_flow *flows; member
/Linux-v5.15/samples/bpf/
Ddo_hbm_test.sh78 flows=1
150 -f=*|--flows=*)
151 flows="${i#*=}"
278 while [ $flow_cnt -le $flows ] ; do
320 while [ $flow_cnt -le $flows ] ; do
346 iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id
366 while [ $flow_cnt -le $flows ] ; do
386 while [ $flow_cnt -le $flows ] ; do
/Linux-v5.15/net/sched/
Dsch_fq_codel.c53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ member
164 flow = &q->flows[idx]; in fq_codel_drop()
204 flow = &q->flows[idx]; in fq_codel_enqueue()
265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func()
343 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset()
383 if (q->flows) in fq_codel_change()
454 kvfree(q->flows); in fq_codel_destroy()
486 if (!q->flows) { in fq_codel_init()
487 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init()
490 if (!q->flows) { in fq_codel_init()
[all …]
Dsch_fq_pie.c57 struct fq_pie_flow *flows; member
149 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue()
301 if (q->flows) { in fq_pie_change()
383 pie_calculate_probability(&q->p_params, &q->flows[idx].vars, in fq_pie_timer()
384 q->flows[idx].backlog); in fq_pie_timer()
424 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init()
426 if (!q->flows) { in fq_pie_init()
431 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init()
515 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset()
535 kvfree(q->flows); in fq_pie_destroy()
Dsch_atm.c66 struct list_head flows; /* NB: "link" is also on this member
78 list_for_each_entry(flow, &p->flows, list) { in lookup_flow()
356 list_for_each_entry(flow, &p->flows, list) { in atm_tc_walk()
394 list_for_each_entry(flow, &p->flows, list) { in atm_tc_enqueue()
478 list_for_each_entry(flow, &p->flows, list) { in sch_atm_dequeue()
549 INIT_LIST_HEAD(&p->flows); in atm_tc_init()
551 list_add(&p->link.list, &p->flows); in atm_tc_init()
577 list_for_each_entry(flow, &p->flows, list) in atm_tc_reset()
588 list_for_each_entry(flow, &p->flows, list) { in atm_tc_destroy()
593 list_for_each_entry_safe(flow, tmp, &p->flows, list) { in atm_tc_destroy()
Dsch_fq.c120 u32 flows; member
254 q->flows -= fcnt; in fq_gc()
304 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify()
305 q->inactive_flows > q->flows/2) in fq_classify()
359 q->flows++; in fq_classify()
692 q->flows = 0; in fq_reset()
737 q->flows -= fcnt; in fq_rehash()
1023 st.flows = q->flows; in fq_dump_stats()
Dsch_cake.c150 struct cake_flow flows[CAKE_QUEUES]; member
743 q->flows[reduced_hash].set)) { in cake_hash()
761 if (!q->flows[outer_hash + k].set) { in cake_hash()
776 if (!q->flows[outer_hash + k].set) { in cake_hash()
788 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash()
789 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; in cake_hash()
790 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; in cake_hash()
817 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
819 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash()
840 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
[all …]
/Linux-v5.15/drivers/crypto/allwinner/sun8i-ss/
Dsun8i-ss-core.c71 ss->flows[flow].stat_req++; in sun8i_ss_run_task()
128 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_task()
129 ss->flows[flow].status = 0; in sun8i_ss_run_task()
134 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_task()
136 if (ss->flows[flow].status == 0) { in sun8i_ss_run_task()
155 ss->flows[flow].status = 1; in ss_irq_handler()
156 complete(&ss->flows[flow].complete); in ss_irq_handler()
420 seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req); in sun8i_ss_debugfs_show()
455 crypto_engine_exit(ss->flows[i].engine); in sun8i_ss_free_flows()
467 ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), in allocate_flows()
[all …]
Dsun8i-ss-prng.c129 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate()
130 ss->flows[flow].status = 0; in sun8i_ss_prng_generate()
136 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate()
138 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
Dsun8i-ss-hash.c208 ss->flows[flow].stat_req++; in sun8i_ss_run_hash_task()
241 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_hash_task()
242 ss->flows[flow].status = 0; in sun8i_ss_run_hash_task()
247 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_hash_task()
249 if (ss->flows[flow].status == 0) { in sun8i_ss_run_hash_task()
310 engine = ss->flows[e].engine; in sun8i_ss_hash_digest()
/Linux-v5.15/drivers/dma/ti/
Dk3-udma-glue.c83 struct k3_udma_glue_rx_flow *flows; member
640 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow()
660 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow()
956 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv()
957 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
958 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv()
968 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv()
1037 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn()
1038 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn()
1039 if (!rx_chn->flows) { in k3_udma_glue_request_remote_rx_chn()
[all …]
/Linux-v5.15/drivers/infiniband/hw/hfi1/
Dtid_rdma.c757 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow()
775 rcd->flows[flow_idx].generation = in kern_clear_hw_flow()
776 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow()
804 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow()
853 rcd->flows[i].generation = mask_generation(prandom_u32()); in hfi1_kern_init_ctxt_generations()
1465 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup()
1556 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_kern_exp_rcv_clear()
1612 kfree(req->flows); in hfi1_kern_exp_rcv_free_flows()
1613 req->flows = NULL; in hfi1_kern_exp_rcv_free_flows()
1634 struct tid_rdma_flow *flows; in hfi1_kern_exp_rcv_alloc_flows() local
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/
Deswitch_offloads.c986 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules; in mlx5_eswitch_del_send_to_vport_meta_rules() local
989 if (!num_vfs || !flows) in mlx5_eswitch_del_send_to_vport_meta_rules()
993 mlx5_del_flow_rules(flows[i]); in mlx5_eswitch_del_send_to_vport_meta_rules()
995 kvfree(flows); in mlx5_eswitch_del_send_to_vport_meta_rules()
1005 struct mlx5_flow_handle **flows; in mlx5_eswitch_add_send_to_vport_meta_rules() local
1012 flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); in mlx5_eswitch_add_send_to_vport_meta_rules()
1013 if (!flows) in mlx5_eswitch_add_send_to_vport_meta_rules()
1047 flows[rule_idx++] = flow_rule; in mlx5_eswitch_add_send_to_vport_meta_rules()
1050 esw->fdb_table.offloads.send_to_vport_meta_rules = flows; in mlx5_eswitch_add_send_to_vport_meta_rules()
1056 mlx5_del_flow_rules(flows[rule_idx]); in mlx5_eswitch_add_send_to_vport_meta_rules()
[all …]
Den_rep.h173 struct list_head flows; member
191 struct list_head flows; member
/Linux-v5.15/Documentation/userspace-api/media/mediactl/
Dmedia-controller-model.rst26 by an entity flows from the entity's output to one or more entity
31 pads, either on the same entity or on different entities. Data flows
/Linux-v5.15/Documentation/networking/
Dnf_flowtable.rst33 specifies what flows are placed into the flowtable. Hence, packets follow the
34 classic IP forwarding path unless the user explicitly instruct flows to use this
111 You can identify offloaded flows through the [OFFLOAD] tag when listing your
130 instead the real device is sufficient for the flowtable to track your flows.
198 There is a workqueue that adds the flows to the hardware. Note that a few
202 You can identify hardware offloaded flows through the [HW_OFFLOAD] tag when
Dscaling.rst31 of logical flows. Packets for each flow are steered to a separate receive
188 to the same CPU is CPU load imbalance if flows vary in packet rate.
194 Flow Limit is an optional RPS feature that prioritizes small flows
195 during CPU contention by dropping packets from large flows slightly
196 ahead of those from small flows. It is active only when an RPS or RFS
202 new packet is dropped. Packets from other flows are still only
206 even large flows maintain connectivity.
224 identification of large flows and fewer false positives. The default
261 flows to the CPUs where those flows are being processed. The flow hash
266 same CPU. Indeed, with many flows and few CPUs, it is very likely that
[all …]
Dopenvswitch.rst16 table" that userspace populates with "flows" that map from keys based
104 A wildcarded flow can represent a group of exact match flows. Each '1' bit
108 by reduce the number of new flows need to be processed by the user space program.
120 two possible approaches: reactively install flows as they miss the kernel
130 The behavior when using overlapping wildcarded flows is undefined. It is the
133 performs best-effort detection of overlapping wildcarded flows and may reject
146 future operations. The kernel is not required to index flows by the original
Dpktgen.rst97 flows: 0 flowlen: 0
112 flows: 0
285 pgset "flows 1"
379 flows
/Linux-v5.15/Documentation/admin-guide/pm/
Dsystem-wide.rst11 suspend-flows
/Linux-v5.15/net/core/
Dpktgen.c414 struct flow_state *flows; member
2308 return !!(pkt_dev->flows[flow].flags & F_INIT); in f_seen()
2316 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { in f_pick()
2318 pkt_dev->flows[flow].count = 0; in f_pick()
2319 pkt_dev->flows[flow].flags = 0; in f_pick()
2328 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { in f_pick()
2329 pkt_dev->flows[flow].count = 0; in f_pick()
2330 pkt_dev->flows[flow].flags = 0; in f_pick()
2345 struct xfrm_state *x = pkt_dev->flows[flow].x; in get_ipsec_sa()
2364 pkt_dev->flows[flow].x = x; in get_ipsec_sa()
[all …]
/Linux-v5.15/Documentation/admin-guide/blockdev/drbd/
Dfigures.rst5 Data flows that Relate some functions, and write packets
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en/
Dtc_tun_encap.c240 list_for_each_entry(efi, &e->flows, list) { in mlx5e_take_all_encap_flows()
361 list_for_each_entry_safe(efi, tmp, &e->flows, list) { in mlx5e_tc_update_neigh_used_value()
406 WARN_ON(!list_empty(&e->flows)); in mlx5e_encap_dealloc()
423 WARN_ON(!list_empty(&d->flows)); in mlx5e_decap_dealloc()
787 INIT_LIST_HEAD(&e->flows); in mlx5e_attach_encap()
813 list_add(&flow->encaps[out_index].list, &e->flows); in mlx5e_attach_encap()
884 INIT_LIST_HEAD(&d->flows); in mlx5e_attach_decap()
907 list_add(&flow->l3_to_l2_reformat, &d->flows); in mlx5e_attach_decap()
/Linux-v5.15/net/dccp/ccids/
DKconfig20 be reasonably fair when competing for bandwidth with TCP-like flows,

1234