Lines Matching full:flow

33  * Each flow has a CoDel managed queue.
37 * For a given flow, packets are not reordered (CoDel uses a FIFO)
40 * Low memory footprint (64 bytes per flow)
117 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument
119 struct sk_buff *skb = flow->head; in dequeue_head()
121 flow->head = skb->next; in dequeue_head()
126 /* add skb to flow queue (tail add) */
127 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument
130 if (flow->head == NULL) in flow_queue_add()
131 flow->head = skb; in flow_queue_add()
133 flow->tail->next = skb; in flow_queue_add()
134 flow->tail = skb; in flow_queue_add()
144 struct fq_codel_flow *flow; in fq_codel_drop() local
148 /* Queue is full! Find the fat flow and drop packet(s) from it. in fq_codel_drop()
152 * In stress mode, we'll try to drop 64 packets from the flow, in fq_codel_drop()
162 /* Our goal is to drop half of this fat flow backlog */ in fq_codel_drop()
165 flow = &q->flows[idx]; in fq_codel_drop()
169 skb = dequeue_head(flow); in fq_codel_drop()
176 flow->cvars.count += i; in fq_codel_drop()
190 struct fq_codel_flow *flow; in fq_codel_enqueue() local
205 flow = &q->flows[idx]; in fq_codel_enqueue()
206 flow_queue_add(flow, skb); in fq_codel_enqueue()
210 if (list_empty(&flow->flowchain)) { in fq_codel_enqueue()
211 list_add_tail(&flow->flowchain, &q->new_flows); in fq_codel_enqueue()
213 flow->deficit = q->quantum; in fq_codel_enqueue()
227 * in q->backlogs[] to find a fat flow. in fq_codel_enqueue()
240 * If we dropped a packet for this flow, return NET_XMIT_CN, in fq_codel_enqueue()
260 struct fq_codel_flow *flow; in dequeue_func() local
263 flow = container_of(vars, struct fq_codel_flow, cvars); in dequeue_func()
264 if (flow->head) { in dequeue_func()
265 skb = dequeue_head(flow); in dequeue_func()
266 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func()
286 struct fq_codel_flow *flow; in fq_codel_dequeue() local
296 flow = list_first_entry(head, struct fq_codel_flow, flowchain); in fq_codel_dequeue()
298 if (flow->deficit <= 0) { in fq_codel_dequeue()
299 flow->deficit += q->quantum; in fq_codel_dequeue()
300 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
305 &flow->cvars, &q->cstats, qdisc_pkt_len, in fq_codel_dequeue()
311 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
313 list_del_init(&flow->flowchain); in fq_codel_dequeue()
317 flow->deficit -= qdisc_pkt_len(skb); in fq_codel_dequeue()
330 static void fq_codel_flow_purge(struct fq_codel_flow *flow) in fq_codel_flow_purge() argument
332 rtnl_kfree_skbs(flow->head, flow->tail); in fq_codel_flow_purge()
333 flow->head = NULL; in fq_codel_flow_purge()
344 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() local
346 fq_codel_flow_purge(flow); in fq_codel_reset()
347 INIT_LIST_HEAD(&flow->flowchain); in fq_codel_reset()
348 codel_vars_init(&flow->cvars); in fq_codel_reset()
493 struct fq_codel_flow *flow = q->flows + i; in fq_codel_init() local
495 INIT_LIST_HEAD(&flow->flowchain); in fq_codel_init()
496 codel_vars_init(&flow->cvars); in fq_codel_init()
624 const struct fq_codel_flow *flow = &q->flows[idx]; in fq_codel_dump_class_stats() local
629 xstats.class_stats.deficit = flow->deficit; in fq_codel_dump_class_stats()
631 codel_time_to_us(flow->cvars.ldelay); in fq_codel_dump_class_stats()
632 xstats.class_stats.count = flow->cvars.count; in fq_codel_dump_class_stats()
633 xstats.class_stats.lastcount = flow->cvars.lastcount; in fq_codel_dump_class_stats()
634 xstats.class_stats.dropping = flow->cvars.dropping; in fq_codel_dump_class_stats()
635 if (flow->cvars.dropping) { in fq_codel_dump_class_stats()
636 codel_tdiff_t delta = flow->cvars.drop_next - in fq_codel_dump_class_stats()
643 if (flow->head) { in fq_codel_dump_class_stats()
645 skb = flow->head; in fq_codel_dump_class_stats()