Lines Matching full:flow
32 * Each flow has a CoDel managed queue.
36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
39 * Low memory footprint (64 bytes per flow)
116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument
118 struct sk_buff *skb = flow->head; in dequeue_head()
120 flow->head = skb->next; in dequeue_head()
125 /* add skb to flow queue (tail add) */
126 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument
129 if (flow->head == NULL) in flow_queue_add()
130 flow->head = skb; in flow_queue_add()
132 flow->tail->next = skb; in flow_queue_add()
133 flow->tail = skb; in flow_queue_add()
143 struct fq_codel_flow *flow; in fq_codel_drop() local
147 /* Queue is full! Find the fat flow and drop packet(s) from it. in fq_codel_drop()
151 * In stress mode, we'll try to drop 64 packets from the flow, in fq_codel_drop()
161 /* Our goal is to drop half of this fat flow backlog */ in fq_codel_drop()
164 flow = &q->flows[idx]; in fq_codel_drop()
168 skb = dequeue_head(flow); in fq_codel_drop()
175 flow->cvars.count += i; in fq_codel_drop()
189 struct fq_codel_flow *flow; in fq_codel_enqueue() local
204 flow = &q->flows[idx]; in fq_codel_enqueue()
205 flow_queue_add(flow, skb); in fq_codel_enqueue()
209 if (list_empty(&flow->flowchain)) { in fq_codel_enqueue()
210 list_add_tail(&flow->flowchain, &q->new_flows); in fq_codel_enqueue()
212 flow->deficit = q->quantum; in fq_codel_enqueue()
226 * in q->backlogs[] to find a fat flow. in fq_codel_enqueue()
239 * If we dropped a packet for this flow, return NET_XMIT_CN, in fq_codel_enqueue()
259 struct fq_codel_flow *flow; in dequeue_func() local
262 flow = container_of(vars, struct fq_codel_flow, cvars); in dequeue_func()
263 if (flow->head) { in dequeue_func()
264 skb = dequeue_head(flow); in dequeue_func()
265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func()
285 struct fq_codel_flow *flow; in fq_codel_dequeue() local
295 flow = list_first_entry(head, struct fq_codel_flow, flowchain); in fq_codel_dequeue()
297 if (flow->deficit <= 0) { in fq_codel_dequeue()
298 flow->deficit += q->quantum; in fq_codel_dequeue()
299 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
304 &flow->cvars, &q->cstats, qdisc_pkt_len, in fq_codel_dequeue()
310 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
312 list_del_init(&flow->flowchain); in fq_codel_dequeue()
316 flow->deficit -= qdisc_pkt_len(skb); in fq_codel_dequeue()
329 static void fq_codel_flow_purge(struct fq_codel_flow *flow) in fq_codel_flow_purge() argument
331 rtnl_kfree_skbs(flow->head, flow->tail); in fq_codel_flow_purge()
332 flow->head = NULL; in fq_codel_flow_purge()
343 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() local
345 fq_codel_flow_purge(flow); in fq_codel_reset()
346 INIT_LIST_HEAD(&flow->flowchain); in fq_codel_reset()
347 codel_vars_init(&flow->cvars); in fq_codel_reset()
502 struct fq_codel_flow *flow = q->flows + i; in fq_codel_init() local
504 INIT_LIST_HEAD(&flow->flowchain); in fq_codel_init()
505 codel_vars_init(&flow->cvars); in fq_codel_init()
638 const struct fq_codel_flow *flow = &q->flows[idx]; in fq_codel_dump_class_stats() local
643 xstats.class_stats.deficit = flow->deficit; in fq_codel_dump_class_stats()
645 codel_time_to_us(flow->cvars.ldelay); in fq_codel_dump_class_stats()
646 xstats.class_stats.count = flow->cvars.count; in fq_codel_dump_class_stats()
647 xstats.class_stats.lastcount = flow->cvars.lastcount; in fq_codel_dump_class_stats()
648 xstats.class_stats.dropping = flow->cvars.dropping; in fq_codel_dump_class_stats()
649 if (flow->cvars.dropping) { in fq_codel_dump_class_stats()
650 codel_tdiff_t delta = flow->cvars.drop_next - in fq_codel_dump_class_stats()
657 if (flow->head) { in fq_codel_dump_class_stats()
659 skb = flow->head; in fq_codel_dump_class_stats()